|
| 1 | +package rio |
| 2 | + |
| 3 | +import ( |
| 4 | + "bytes" |
| 5 | + "fmt" |
| 6 | + "io" |
| 7 | + "os" |
| 8 | + "strings" |
| 9 | + |
| 10 | + harness "github.com/drone/go-convert/convert/harness/yaml" |
| 11 | + rio "github.com/drone/go-convert/convert/rio/yaml" |
| 12 | + "github.com/drone/go-convert/internal/store" |
| 13 | + "gopkg.in/yaml.v3" |
| 14 | +) |
| 15 | + |
| 16 | +// as we walk the yaml, we store a |
| 17 | +// snapshot of the current node and |
| 18 | +// its parents. |
| 19 | +type context struct { |
| 20 | + config *rio.Config |
| 21 | +} |
| 22 | + |
| 23 | +// Converter converts a Rio pipeline to a Harness |
| 24 | +// v0 pipeline. |
| 25 | +type Converter struct { |
| 26 | + kubeEnabled bool |
| 27 | + kubeNamespace string |
| 28 | + kubeConnector string |
| 29 | + kubeOs string |
| 30 | + dockerhubConn string |
| 31 | + identifiers *store.Identifiers |
| 32 | + |
| 33 | + pipelineId string |
| 34 | + pipelineName string |
| 35 | + pipelineOrg string |
| 36 | + pipelineProj string |
| 37 | +} |
| 38 | + |
| 39 | +// New creates a new Converter that converts a Travis |
| 40 | +// pipeline to a Harness v1 pipeline. |
| 41 | +func New(options ...Option) *Converter { |
| 42 | + d := new(Converter) |
| 43 | + |
| 44 | + // create the unique identifier store. this store |
| 45 | + // is used for registering unique identifiers to |
| 46 | + // prevent duplicate names, unique index violations. |
| 47 | + d.identifiers = store.New() |
| 48 | + |
| 49 | + // loop through and apply the options. |
| 50 | + for _, option := range options { |
| 51 | + option(d) |
| 52 | + } |
| 53 | + |
| 54 | + // set the default kubernetes namespace. |
| 55 | + if d.kubeNamespace == "" { |
| 56 | + d.kubeNamespace = "default" |
| 57 | + } |
| 58 | + |
| 59 | + // set default kubernetes OS |
| 60 | + if d.kubeOs == "" { |
| 61 | + d.kubeOs = "Linux" |
| 62 | + } |
| 63 | + |
| 64 | + // set the runtime to kubernetes if the kubernetes |
| 65 | + // connector is configured. |
| 66 | + if d.kubeConnector != "" { |
| 67 | + d.kubeEnabled = true |
| 68 | + } |
| 69 | + |
| 70 | + // set default docker connector |
| 71 | + if d.dockerhubConn == "" { |
| 72 | + d.dockerhubConn = "account.harnessImage" |
| 73 | + } |
| 74 | + |
| 75 | + return d |
| 76 | +} |
| 77 | + |
| 78 | +// Convert converts a rio pipeline to v0. |
| 79 | +func (d *Converter) Convert(r io.Reader) ([]byte, error) { |
| 80 | + config, err := rio.Parse(r) |
| 81 | + if err != nil { |
| 82 | + return nil, err |
| 83 | + } |
| 84 | + return d.convert(&context{ |
| 85 | + config: config, |
| 86 | + }) |
| 87 | +} |
| 88 | + |
| 89 | +// ConvertBytes converts a rio pipeline to v0. |
| 90 | +func (d *Converter) ConvertBytes(b []byte) ([]byte, error) { |
| 91 | + return d.Convert( |
| 92 | + bytes.NewBuffer(b), |
| 93 | + ) |
| 94 | +} |
| 95 | + |
| 96 | +// ConvertString converts a rio pipeline to v0. |
| 97 | +func (d *Converter) ConvertString(s string) ([]byte, error) { |
| 98 | + return d.Convert( |
| 99 | + bytes.NewBufferString(s), |
| 100 | + ) |
| 101 | +} |
| 102 | + |
| 103 | +// ConvertFile converts a rio pipeline to v0. |
| 104 | +func (d *Converter) ConvertFile(p string) ([]byte, error) { |
| 105 | + f, err := os.Open(p) |
| 106 | + if err != nil { |
| 107 | + return nil, err |
| 108 | + } |
| 109 | + defer f.Close() |
| 110 | + return d.Convert(f) |
| 111 | +} |
| 112 | + |
| 113 | +func (d *Converter) convertRunStep(p rio.Pipeline) harness.StepRun { |
| 114 | + step := new(harness.StepRun) |
| 115 | + step.Env = p.Machine.Env |
| 116 | + command := "" |
| 117 | + for _, s := range p.Build.Steps { |
| 118 | + command += fmt.Sprintf("%s\n", s) |
| 119 | + } |
| 120 | + step.Command = command |
| 121 | + step.Shell = "Sh" |
| 122 | + step.ConnRef = d.dockerhubConn |
| 123 | + step.Image = p.Machine.BaseImage |
| 124 | + return *step |
| 125 | +} |
| 126 | + |
| 127 | +func (d *Converter) convertDockerSteps(dockerfile rio.Dockerfile) []harness.StepDocker { |
| 128 | + steps := make([]harness.StepDocker, 0) |
| 129 | + for _, r := range dockerfile.Publish { |
| 130 | + step := new(harness.StepDocker) |
| 131 | + step.Context = dockerfile.Context |
| 132 | + step.Dockerfile = dockerfile.DockerfilePath |
| 133 | + step.Repo = r.Repo |
| 134 | + step.Tags = []string{dockerfile.Version} |
| 135 | + step.BuildsArgs = dockerfile.Env |
| 136 | + step.ConnectorRef = d.dockerhubConn |
| 137 | + steps = append(steps, *step) |
| 138 | + } |
| 139 | + return steps |
| 140 | +} |
| 141 | + |
| 142 | +func (d *Converter) convertExecution(p rio.Pipeline) harness.Execution { |
| 143 | + execution := harness.Execution{} |
| 144 | + |
| 145 | + executionSteps := make([]*harness.Steps, 0) |
| 146 | + // Append all commands in build attribute to one run step |
| 147 | + if len(p.Build.Steps) != 0 { |
| 148 | + steps := harness.Steps{ |
| 149 | + Step: &harness.Step{ |
| 150 | + Name: "run", |
| 151 | + ID: "run", |
| 152 | + Spec: d.convertRunStep(p), |
| 153 | + Type: "Run", |
| 154 | + }, |
| 155 | + } |
| 156 | + executionSteps = append(executionSteps, &steps) |
| 157 | + } |
| 158 | + |
| 159 | + dockerStepCounter := 1 |
| 160 | + if len(p.Pkg.Dockerfile) != 0 { |
| 161 | + if !p.Pkg.Release { |
| 162 | + fmt.Println("# [WARN]: release=false is not supported") |
| 163 | + } |
| 164 | + |
| 165 | + // Each entry in package.Dockerfile attribute is one build and push step |
| 166 | + for _, dockerfile := range p.Pkg.Dockerfile { |
| 167 | + // each dockerfile entry can have multiple repos |
| 168 | + dockerSteps := d.convertDockerSteps(dockerfile) |
| 169 | + |
| 170 | + // Append all docker steps |
| 171 | + for _, dStep := range dockerSteps { |
| 172 | + steps := harness.Steps{ |
| 173 | + Step: &harness.Step{ |
| 174 | + Name: fmt.Sprintf("docker_build_and_push_%d", dockerStepCounter), |
| 175 | + ID: fmt.Sprintf("docker_build_and_push_%d", dockerStepCounter), |
| 176 | + Spec: &dStep, |
| 177 | + Type: "BuildAndPushDockerRegistry", |
| 178 | + }, |
| 179 | + } |
| 180 | + executionSteps = append(executionSteps, &steps) |
| 181 | + dockerStepCounter++ |
| 182 | + } |
| 183 | + } |
| 184 | + } |
| 185 | + execution.Steps = executionSteps |
| 186 | + return execution |
| 187 | +} |
| 188 | + |
| 189 | +func (d *Converter) convertCIStage(p rio.Pipeline) harness.StageCI { |
| 190 | + stage := harness.StageCI{ |
| 191 | + Execution: d.convertExecution(p), |
| 192 | + } |
| 193 | + if d.kubeEnabled { |
| 194 | + infra := harness.Infrastructure{ |
| 195 | + Type: "KubernetesDirect", |
| 196 | + Spec: &harness.InfraSpec{ |
| 197 | + Namespace: d.kubeNamespace, |
| 198 | + Conn: d.kubeConnector, |
| 199 | + AutomountServiceToken: true, |
| 200 | + Os: d.kubeOs, |
| 201 | + }, |
| 202 | + } |
| 203 | + stage.Infrastructure = &infra |
| 204 | + } |
| 205 | + return stage |
| 206 | +} |
| 207 | + |
| 208 | +func convertNameToID(name string) string { |
| 209 | + ID := strings.ReplaceAll(name, " ", "_") |
| 210 | + ID = strings.ReplaceAll(ID, "-", "_") |
| 211 | + return ID |
| 212 | +} |
| 213 | + |
| 214 | +// converts converts a Travis pipeline to a Harness pipeline. |
| 215 | +func (d *Converter) convert(ctx *context) ([]byte, error) { |
| 216 | + // create the harness pipeline spec |
| 217 | + pipeline := &harness.Pipeline{} |
| 218 | + for _, p := range ctx.config.Pipelines { |
| 219 | + stage := harness.Stages{ |
| 220 | + Stage: &harness.Stage{ |
| 221 | + Name: p.Name, |
| 222 | + ID: convertNameToID(p.Name), |
| 223 | + Spec: d.convertCIStage(p), |
| 224 | + Type: "CI", |
| 225 | + }, |
| 226 | + } |
| 227 | + pipeline.Stages = append(pipeline.Stages, &stage) |
| 228 | + } |
| 229 | + pipeline.Name = d.pipelineName |
| 230 | + pipeline.ID = d.pipelineId |
| 231 | + pipeline.Org = d.pipelineOrg |
| 232 | + pipeline.Project = d.pipelineProj |
| 233 | + |
| 234 | + // create the harness pipeline resource |
| 235 | + config := &harness.Config{Pipeline: *pipeline} |
| 236 | + |
| 237 | + // marshal the harness yaml |
| 238 | + out, err := yaml.Marshal(config) |
| 239 | + if err != nil { |
| 240 | + return nil, err |
| 241 | + } |
| 242 | + |
| 243 | + return out, nil |
| 244 | +} |
0 commit comments