Browse Source

pipeline: Builder and stage implementation

Provides `PipelineBuilder` for reading `.pipeline/config.yaml` and
mapping user-defined pipelines, stages, and execution graphs to actual
Jenkins Pipeline stage definitions.

Provides `Pipeline` class that constructs a "stack" of `PipelineStage`
objects from the user-provided configs, each with its own `NodeContext`
for binding output values to names and consuming bound values from
previous stages.

Provides `PipelineStage` that contains core stage step implementations
based on the existing `service-pipeline` JJB job definition in
`integration/config`. A closure is returned by each stage for passing
off to Jenkins Pipeline stage definitions by the builder.

Steps have a fixed order within a given stage: build, run, publish,
deploy, exports. This allows for concise definition of a stage that
performs multiple steps, and deterministic behavior of default
configuration that references locally bound output values (e.g. the
default configuration of `image:` for an `publish: { type: image }`
publish entry is `${.imageID}`, referencing the image built in the
current stage's `build` step.) If the user needs to change ordering,
they can simply break the stage out into multiple stages.

See the `Pipeline` class for currently supported configuration. Note
that the aforementioned context system allows for user's to make use of
the same value bindings that step implementations use internally. They
can also use the `exports` configuration field to bind new values.

To illustrate the minimally required configuration, the following would
approximate the current `service-pipeline-test-and-publish` JJB job for
a project named "foo".

    pipelines:
      foo:
        directory: src/foo
        stages:
          - name: test           # builds/runs "test" variant
          - name: candidate
            build: production
            publish:
              image: true
            deploy:              # currently only the "ci" cluster
              chart: https://releases.wikimedia.org/charts/foo-0.0.1.tgz
              test: true

And to illustrate how the "candidate" stage in this example could be
expressed as multiple stages using references to the output names that
steps bind/export:

    pipelines:
      foo:
        directory: src/foo
        stages:
          - name: tested
          - name: built
            build: production
          - name: published
            publish:
              image:
                id: '${built.imageID}'
            exports:
              image: '${.imageFullName}:${.imageTag}'
          - name: staged
            deploy:
              image: '${published.image}'
              chart: https://releases.wikimedia.org/charts/foo-0.0.1.tgz
              test: true

Bug: T210267
Change-Id: I5a41d0d33ed7e9174db6178ab7921f5143296c75
Dan Duvall 5 months ago
parent
commit
d5fedb1206

+ 3
- 0
.gitignore View File

@@ -1,2 +1,5 @@
1 1
 /.gradle
2
+/.idea
3
+/.project
2 4
 /build
5
+pipelinelib.iml

+ 8
- 0
Makefile View File

@@ -7,6 +7,14 @@ DOCKER_TAG := piplinelib-tests-$(shell date -I)
7 7
 
8 8
 .PHONY: test
9 9
 
10
+clean:
11
+ifneq (,$(DOCKER))
12
+	$(DOCKER_STOP_ALL) 2> /dev/null || true
13
+	$(DOCKER_RMI) 2> /dev/null || true
14
+else
15
+	@echo "Not using Docker. Nothing to do."
16
+endif
17
+
10 18
 doc: docs
11 19
 docs:
12 20
 	gradle groovydoc

+ 178
- 0
src/org/wikimedia/integration/Pipeline.groovy View File

@@ -0,0 +1,178 @@
1
+package org.wikimedia.integration
2
+
3
+import org.codehaus.groovy.GroovyException
4
+
5
+import static org.wikimedia.integration.PipelineStage.*
6
+
7
+import org.wikimedia.integration.ExecutionContext
8
+import org.wikimedia.integration.ExecutionGraph
9
+import org.wikimedia.integration.PipelineStage
10
+import org.wikimedia.integration.PipelineRunner
11
+
12
+/**
13
+ * Defines a Jenkins Workflow based on a given configuration.
14
+ *
15
+ * The given configuration should look like this:
16
+ *
17
+ * <pre><code>
18
+ * pipelines:
19
+ *   serviceOne:
20
+ *     blubberfile: serviceOne/blubber.yaml           # default based on service name for the dir
21
+ *     directory: src/serviceOne
22
+ *     execution:                                     # directed graph of stages to run
23
+ *       - [unit, candidate]                          # each arc is represented horizontally
24
+ *       - [lint, candidate]
25
+ *       - [candidate, staging, production]           # common segments of arcs can be defined separately too
26
+ *     stages:                                        # stage defintions
27
+ *       - name: unit                                 # stage name (required)
28
+ *         build: phpunit                             # build an image variant
29
+ *         run: "${.imageID}"                         # run the built image
30
+ *         publish:
31
+ *           files:                                   # publish select artifact files from the built/run image
32
+ *             paths: ["foo/*", "bar"]                # copy files {foo/*,bar} from the image fs to ./artifacts/{foo/*,bar}
33
+ *       - name: lint                                 # default (build/run "lint" variant, no artifacts, etc.)
34
+ *       - name: candidate
35
+ *         build: production
36
+ *         publish:
37
+ *           image:                                   # publish built image to our docker registry
38
+ *             id: "${.imageID}"                      # image reference
39
+ *             name: "${setup.project}"               # image name
40
+ *             tag: "${setup.timestamp}-${.stage}"    # primary tag
41
+ *             tags: [candidate]                      # additional tags
42
+ *         exports:                                   # export stage values under new names
43
+ *           image: "${.imageFullName}:${.imageTag}"  # new variable name and interpolated value
44
+ *       - name: staging
45
+ *         deploy:                                    # deploy image to a cluster
46
+ *           image: "${candidate.image}"              # image name:tag reference
47
+ *           cluster: ci                              # default "ci" k8s cluster
48
+ *           chart: http://helm/chart                 # helm chart to use for deployment
49
+ *           test: true                               # run `helm test` on deployment
50
+ *       - name: production
51
+ *         deploy:
52
+ *           cluster: production
53
+ *           chart: http://helm/chart
54
+ *   serviceTwo:
55
+ *     directory: src/serviceTwo
56
+ * </code></pre>
57
+ */
58
+class Pipeline implements Serializable {
59
+  String name
60
+  String blubberfile
61
+  String directory
62
+  String dockerRegistry
63
+
64
+  private Map stagesConfig
65
+  private List<List> execution
66
+
67
+  /**
68
+   * Constructs a new pipeline with the given name and configuration.
69
+   */
70
+  Pipeline(String pipelineName, Map config) {
71
+    name = pipelineName
72
+    blubberfile = config.blubberfile ?: "${name}/blubber.yaml"
73
+    directory = config.directory ?: "."
74
+    dockerRegistry = config.dockerRegistry
75
+
76
+    stagesConfig = config.stages.collectEntries{
77
+      [(it.name): PipelineStage.defaultConfig(it)]
78
+    }
79
+
80
+    execution = config.execution ?: [config.stages.collect { it.name }]
81
+  }
82
+
83
+  /**
84
+   * Returns a set of node labels that will be required for this pipeline to
85
+   * function correctly.
86
+   */
87
+  Set getRequiredNodeLabels() {
88
+    def labels = [] as Set
89
+
90
+    for (def nodes in stack()) {
91
+      for (def node in nodes) {
92
+        labels += node.getRequiredNodeLabels()
93
+      }
94
+    }
95
+
96
+    labels
97
+  }
98
+
99
+  /**
100
+   * Returns the pipeline's stage stack bound with an execution context.
101
+   */
102
+  List stack() {
103
+    def graph = setup() + (new ExecutionGraph(execution)) + teardown()
104
+    def context = new ExecutionContext(graph)
105
+
106
+    graph.stack().collect {
107
+      it.collect { stageName ->
108
+        createStage(stageName, context.ofNode(stageName))
109
+      }
110
+    }
111
+  }
112
+
113
+  /**
114
+   * Returns a {@link PipelineRunner} for this pipeline and the given workflow
115
+   * script object.
116
+   */
117
+  PipelineRunner runner(ws) {
118
+    def runner = new PipelineRunner(ws,
119
+      blubberConfig: blubberfile,
120
+      kubeConfig: "/etc/kubernetes/ci-staging.config",
121
+      registry: dockerRegistry,
122
+    )
123
+
124
+    // make the PipelineRunner configPath relative to the pipeline's directory
125
+    def prefix = "../" * directory.split('/').count { !(it in ["", "."]) }
126
+    runner.configPath = prefix + runner.configPath
127
+
128
+    runner
129
+  }
130
+
131
+  /**
132
+   * Validates the pipeline configuration, throwing a {@link ValidationException}
133
+   * if anything is amiss.
134
+   */
135
+  void validate() throws ValidationException {
136
+    def errors = []
137
+
138
+    // TODO expand validation
139
+    if (PipelineStage.SETUP in stagesConfig) {
140
+      errors += "${PipelineStage.SETUP} is a reserved stage name"
141
+    }
142
+
143
+    if (PipelineStage.TEARDOWN in stagesConfig) {
144
+      errors += "${PipelineStage.TEARDOWN} is a reserved stage name"
145
+    }
146
+
147
+    if (errors) {
148
+      throw new ValidationException(errors: errors)
149
+    }
150
+  }
151
+
152
+  private ExecutionGraph setup() {
153
+    new ExecutionGraph([[SETUP_STAGE]])
154
+  }
155
+
156
+  private ExecutionGraph teardown() {
157
+    new ExecutionGraph([[TEARDOWN_STAGE]])
158
+  }
159
+
160
+  private PipelineStage createStage(stageName, context) {
161
+    new PipelineStage(
162
+      this,
163
+      stageName,
164
+      stagesConfig[stageName] ? stagesConfig[stageName] : [:],
165
+      context,
166
+    )
167
+  }
168
+
169
+  class ValidationException extends GroovyException {
170
+    def errors
171
+
172
+    String getMessage() {
173
+      def msgs = errors.collect { " - ${it}" }.join("\n")
174
+
175
+      "Pipeline configuration validation failed:\n${msgs}"
176
+    }
177
+  }
178
+}

+ 83
- 0
src/org/wikimedia/integration/PipelineBuilder.groovy View File

@@ -0,0 +1,83 @@
1
+package org.wikimedia.integration
2
+
3
+import org.wikimedia.integration.ExecutionGraph
4
+import org.wikimedia.integration.Pipeline
5
+
6
+class PipelineBuilder implements Serializable {
7
+  String configPath
8
+
9
+  /**
10
+   * Constructs a new {@PipelineBuilder} from the given YAML configuration.
11
+   */
12
+  PipelineBuilder(String pipelineConfigPath) {
13
+    configPath = pipelineConfigPath
14
+  }
15
+
16
+  /**
17
+   * Builds a single-node Jenkins workflow script for each of the configured
18
+   * pipelines.
19
+   *
20
+   * If a pipeline defines any branching arcs in its directed
21
+   * <code>execution</code> graph, they will be iterated over concurrently—in
22
+   * the order that {@link ExecutionGraph#executions()} returns—and their
23
+   * stages defined as <code>parallel</code> stages in the workflow script.
24
+   *
25
+   * @param ws Jenkins Workflow Script (`this` when writing a Jenkinsfile)
26
+   */
27
+  void build(ws) {
28
+    def config
29
+
30
+    ws.node {
31
+      ws.stage("configure") {
32
+        ws.checkout(ws.scm)
33
+        config = ws.readYaml(file: configPath)
34
+      }
35
+    }
36
+
37
+    for (def pline in pipelines(config)) {
38
+      def stack = pline.stack()
39
+
40
+      ws.node(pline.getRequiredNodeLabels().join(" && ")) {
41
+        try {
42
+          for (def stages in stack) {
43
+            if (stages.size() > 1) {
44
+              def stageClosures = [:]
45
+              for (def stage in stages) {
46
+                stageClosures[stage.name] = stage.closure(ws)
47
+              }
48
+
49
+              ws.stage("${pline.name}: [parallel]") {
50
+                ws.parallel(stageClosures)
51
+              }
52
+            } else {
53
+              def stage = stages[0]
54
+              ws.stage("${pline.name}: ${stage.name}", stage.closure(ws))
55
+            }
56
+          }
57
+        } catch (exception) {
58
+          ws.currentBuild.result = 'FAILURE'
59
+
60
+          // ensure teardown steps are always executed
61
+          for (def stage in stack.last()) {
62
+            if (stage == "_teardown") {
63
+              stage.closure(ws)()
64
+            }
65
+          }
66
+
67
+          throw exception
68
+        }
69
+      }
70
+    }
71
+  }
72
+
73
+  /**
74
+   * Constructs and returns all pipelines from the given configuration.
75
+   */
76
+  List pipelines(cfg) {
77
+    cfg.pipelines.collect { pname, pconfig ->
78
+      def pline = new Pipeline(pname, pconfig)
79
+      pline.validate()
80
+      pline
81
+    }
82
+  }
83
+}

+ 83
- 6
src/org/wikimedia/integration/PipelineRunner.groovy View File

@@ -4,6 +4,9 @@ import java.io.FileNotFoundException
4 4
 
5 5
 import static org.wikimedia.integration.Utility.*
6 6
 
7
+import org.wikimedia.integration.GerritPipelineComment
8
+import org.wikimedia.integration.GerritReview
9
+
7 10
 /**
8 11
  * Provides an interface to common pipeline build/run/deploy functions.
9 12
  *
@@ -101,7 +104,7 @@ class PipelineRunner implements Serializable {
101 104
     }
102 105
 
103 106
     def blubber = new Blubber(workflowScript, cfg, blubberoidURL)
104
-    def dockerfile = getConfigFile("Dockerfile")
107
+    def dockerfile = getTempFile("Dockerfile.")
105 108
 
106 109
     workflowScript.writeFile(text: blubber.generateDockerfile(variant), file: dockerfile)
107 110
 
@@ -127,6 +130,19 @@ class PipelineRunner implements Serializable {
127 130
 
128 131
     assert cfg instanceof Map && cfg.chart : "you must define 'chart: <helm chart url>' in ${cfg}"
129 132
 
133
+    deployWithChart(cfg.chart, imageName, imageTag, overrides)
134
+  }
135
+
136
+  /**
137
+   * Deploys the given registered image using the given Helm chart and returns
138
+   * the name of the release.
139
+   *
140
+   * @param chart Chart URL.
141
+   * @param imageName Name of the registered image to deploy.
142
+   * @param imageTag  Tag of the registered image to use.
143
+   * @param overrides Additional Helm value overrides to set.
144
+   */
145
+  String deployWithChart(String chart, String imageName, String imageTag, Map overrides = [:]) {
130 146
     def values = [
131 147
       "docker.registry": registry,
132 148
       "docker.pull_policy": pullPolicy,
@@ -139,7 +155,7 @@ class PipelineRunner implements Serializable {
139 155
     def release = imageName + "-" + randomAlphanum(8)
140 156
 
141 157
     helm("install --namespace=${arg(namespace)} --set ${values} -n ${arg(release)} " +
142
-         "--debug --wait --timeout ${timeout} ${arg(cfg.chart)}")
158
+         "--debug --wait --timeout ${timeout} ${arg(chart)}")
143 159
 
144 160
     release
145 161
   }
@@ -154,12 +170,32 @@ class PipelineRunner implements Serializable {
154 170
   }
155 171
 
156 172
   /**
173
+   * Returns a path under configPath to a temp file with the given base name.
174
+   *
175
+   * @param baseName File base name.
176
+   */
177
+  String getTempFile(String baseName) {
178
+    getConfigFile("${baseName}${randomAlphanum(8)}")
179
+  }
180
+
181
+  /**
157 182
    * Deletes and purges the given Helm release.
158 183
    *
159 184
    * @param release Previously deployed release name.
160 185
    */
161 186
   void purgeRelease(String release) {
162
-    helm("delete --purge ${arg(release)}")
187
+    purgeReleases([release])
188
+  }
189
+
190
+  /**
191
+   * Deletes and purges the given Helm release.
192
+   *
193
+   * @param release Previously deployed release name.
194
+   */
195
+  void purgeReleases(List releases) {
196
+    if (releases.size() > 0) {
197
+      helm("delete --purge ${args(releases)}")
198
+    }
163 199
   }
164 200
 
165 201
   /**
@@ -194,17 +230,58 @@ class PipelineRunner implements Serializable {
194 230
    * @param imageID ID of the image to remove.
195 231
    */
196 232
   void removeImage(String imageID) {
197
-    workflowScript.sh("docker rmi --force ${arg(imageID)}")
233
+    removeImages([imageID])
234
+  }
235
+
236
+  /**
237
+   * Removes the given images from the local cache.
238
+   *
239
+   * @param imageIDs IDs of images to remove.
240
+   */
241
+  void removeImages(List imageIDs) {
242
+    if (imageIDs.size() > 0) {
243
+      workflowScript.sh("docker rmi --force ${args(imageIDs)}")
244
+    }
245
+  }
246
+
247
+  /**
248
+   * Submits a comment to Gerrit with the build result and links to published
249
+   * images.
250
+   *
251
+   * @param imageName Fully qualified name of published image.
252
+   * @param imageTags Image tags.
253
+   */
254
+  void reportToGerrit(imageName, imageTags = []) {
255
+    def comment
256
+
257
+    if (workflowScript.currentBuild.result == 'SUCCESS' && imageName) {
258
+      comment = new GerritPipelineComment(
259
+        jobName: workflowScript.env.JOB_NAME,
260
+        buildNumber: workflowScript.env.BUILD_NUMBER,
261
+        jobStatus: workflowScript.currentBuild.result,
262
+        image: imageName,
263
+        tags: imageTags,
264
+      )
265
+    } else {
266
+      comment = new GerritPipelineComment(
267
+        jobName: workflowScript.env.JOB_NAME,
268
+        buildNumber: workflowScript.env.BUILD_NUMBER,
269
+        jobStatus: workflowScript.currentBuild.result,
270
+      )
271
+    }
272
+
273
+    GerritReview.post(workflowScript, comment)
198 274
   }
199 275
 
200 276
   /**
201 277
    * Runs a container using the image specified by the given ID.
202 278
    *
203 279
    * @param imageID Image ID.
280
+   * @param arguments Entry-point arguments.
204 281
    */
205
-  void run(String imageID) {
282
+  void run(String imageID, List arguments = []) {
206 283
     workflowScript.timeout(time: 20, unit: "MINUTES") {
207
-      workflowScript.sh("exec docker run --rm ${arg(imageID)}")
284
+      workflowScript.sh("exec docker run --rm ${args([imageID] + arguments)}")
208 285
     }
209 286
   }
210 287
 

+ 525
- 0
src/org/wikimedia/integration/PipelineStage.groovy View File

@@ -0,0 +1,525 @@
1
+package org.wikimedia.integration
2
+
3
+import com.cloudbees.groovy.cps.NonCPS
4
+
5
+import static org.wikimedia.integration.Utility.timestampLabel
6
+
7
+import org.wikimedia.integration.ExecutionContext
8
+import org.wikimedia.integration.PatchSet
9
+import org.wikimedia.integration.Pipeline
10
+
11
+class PipelineStage implements Serializable {
12
+  static final String SETUP = 'setup'
13
+  static final String TEARDOWN = 'teardown'
14
+  static final List STEPS = ['build', 'run', 'publish', 'deploy', 'exports']
15
+
16
+
17
+  Pipeline pipeline
18
+  String name
19
+  Map config
20
+
21
+  private ExecutionContext.NodeContext context
22
+
23
+  /**
24
+   * Returns an config based on the given one but with default values
25
+   * inserted.
26
+   *
27
+   * @example Shorthand stage config (providing only a stage name)
28
+   * <pre><code>
29
+   *   def cfg = [name: "foo"]
30
+   *
31
+   *   assert PipelineStage.defaultConfig(cfg) == [
32
+   *     name: "foo",
33
+   *     build: '${.stage}',     // builds a variant by the same name
34
+   *     run: [
35
+   *       image: '${.imageID}', // runs the variant built by this stage
36
+   *       arguments: [],
37
+   *     ],
38
+   *   ]
39
+   * </code></pre>
40
+   *
41
+   * @example Configuring `run: true` means run the variant built by this
42
+   * stage
43
+   * <pre><code>
44
+   *   def cfg = [name: "foo", build: "foo", run: true]
45
+   *
46
+   *   assert PipelineStage.defaultConfig(cfg) == [
47
+   *     name: "foo",
48
+   *     build: "foo",
49
+   *     run: [
50
+   *       image: '${.imageID}', // runs the variant built by this stage
51
+   *       arguments: [],
52
+   *     ],
53
+   *   ]
54
+   * </code></pre>
55
+   *
56
+   * @example Publish image default configuration
57
+   * <pre><code>
58
+   *   def cfg = [image: true]
59
+   *   def defaults = PipelineStage.defaultConfig(cfg)
60
+   *
61
+   *   // publish.image.id defaults to the previously built image
62
+   *   assert defaults.publish.image.id == '${.imageID}'
63
+   *
64
+   *   // publish.image.name defaults to the project name
65
+   *   assert defaults.publish.image.name == '${setup.project}'
66
+   *
67
+   *   // publish.image.tag defaults to {timestamp}-{stage name}
68
+   *   assert defaults.publish.image.tag == '${setup.timestamp}-${.stage}'
69
+   * </code></pre>
70
+   */
71
+  @NonCPS
72
+  static Map defaultConfig(Map cfg) {
73
+    Map dcfg
74
+
75
+    // shorthand with just name is: build and run a variant
76
+    if (cfg.size() == 1 && cfg["name"]) {
77
+      dcfg = cfg + [
78
+        build: '${.stage}',
79
+        run: [
80
+          image: '${.imageID}',
81
+        ]
82
+      ]
83
+    } else {
84
+      dcfg = cfg.clone()
85
+    }
86
+
87
+    if (dcfg.run) {
88
+      // run: true means run the built image
89
+      if (dcfg.run == true) {
90
+        dcfg.run = [
91
+          image: '${.imageID}',
92
+        ]
93
+      } else {
94
+        dcfg.run = dcfg.run.clone()
95
+      }
96
+
97
+      // run.image defaults to previously built image
98
+      dcfg.run.image = dcfg.run.image ?: '${.imageID}'
99
+
100
+      // run.arguments defaults to []
101
+      dcfg.run.arguments = dcfg.run.arguments ?: []
102
+    }
103
+
104
+    if (dcfg.publish) {
105
+      def pcfg = dcfg.publish.clone()
106
+
107
+      if (pcfg.image) {
108
+        if (pcfg.image == true) {
109
+          pcfg.image = [:]
110
+        } else {
111
+          pcfg.image = pcfg.image.clone()
112
+        }
113
+
114
+        // publish.image.id defaults to the previously built image
115
+        pcfg.image.id = pcfg.image.id ?: '${.imageID}'
116
+
117
+        // publish.image.name defaults to the project name
118
+        pcfg.image.name = pcfg.image.name ?: "\${${SETUP}.project}"
119
+
120
+        // publish.image.tag defaults to {timestamp}-{stage name}
121
+        pcfg.image.tag = pcfg.image.tag ?: "\${${SETUP}.timestamp}-\${.stage}"
122
+
123
+        pcfg.image.tags = (pcfg.image.tags ?: []).clone()
124
+      }
125
+
126
+      if (pcfg.files) {
127
+        pcfg.files.paths = pcfg.files.paths.clone()
128
+      }
129
+
130
+      dcfg.publish = pcfg
131
+    }
132
+
133
+    if (dcfg.deploy) {
134
+      dcfg.deploy = dcfg.deploy.clone()
135
+
136
+      dcfg.deploy.image = dcfg.deploy.image ?: '${.publishedImage}'
137
+      dcfg.deploy.cluster = dcfg.deploy.cluster ?: "ci"
138
+      dcfg.deploy.test = dcfg.deploy.test == null ? true : dcfg.test
139
+    }
140
+
141
+    dcfg
142
+  }
143
+
144
+  PipelineStage(Pipeline pline, String stageName, Map stageConfig, nodeContext) {
145
+    pipeline = pline
146
+    name = stageName
147
+    config = stageConfig
148
+    context = nodeContext
149
+  }
150
+
151
+  /**
152
+   * Constructs and retruns a closure for this pipeline stage using the given
153
+   * Jenkins workflow script object.
154
+   */
155
+  Closure closure(ws) {
156
+    ({
157
+      def runner = pipeline.runner(ws)
158
+
159
+      context["stage"] = name
160
+
161
+      switch (name) {
162
+      case SETUP:
163
+        setup(ws, runner)
164
+        break
165
+      case TEARDOWN:
166
+        teardown(ws, runner)
167
+        break
168
+      default:
169
+        ws.echo("running steps in ${pipeline.directory} with config: ${config.inspect()}")
170
+
171
+        ws.dir(pipeline.directory) {
172
+          for (def stageStep in STEPS) {
173
+            if (config[stageStep]) {
174
+              ws.echo("step: ${stageStep}")
175
+              this."${stageStep}"(ws, runner)
176
+            }
177
+          }
178
+        }
179
+      }
180
+    })
181
+  }
182
+
183
+  /**
184
+   * Returns a set of node labels that will be required for this stage to
185
+   * function correctly.
186
+   */
187
+  Set getRequiredNodeLabels() {
188
+    def labels = [] as Set
189
+
190
+    if (config.build || config.run) {
191
+      labels.add("blubber")
192
+    }
193
+
194
+    if (config.publish) {
195
+      for (def publish in config.publish) {
196
+        if (publish.type == "files") {
197
+          labels.add("blubber")
198
+        } else if (publish.type == "image") {
199
+          labels.add("dockerPublish")
200
+        }
201
+      }
202
+    }
203
+
204
+    labels
205
+  }
206
+
207
+  /**
208
+   * Performs setup steps, checkout out the repo and binding useful values to
209
+   * be used by all other stages (default image labels, project identifier,
210
+   * timestamp, etc).
211
+   *
212
+   * <h3>Exports</h3>
213
+   * <dl>
214
+   * <dt><code>${setup.project}</code></dt>
215
+   * <dd>ZUUL_PROJECT parameter value if getting a patchset from Zuul.</dd>
216
+   * <dd>Jenkins JOB_NAME value otherwise.</dd>
217
+   *
218
+   * <dt><code>${setup.timestamp}</code></dt>
219
+   * <dd>Timestamp at the start of pipeline execution. Used in image tags, etc.</dd>
220
+   *
221
+   * <dt><code>${setup.imageLabels}</code></dt>
222
+   * <dd>Default set of image labels:
223
+   *    <code>jenkins.job</code>,
224
+   *    <code>jenkins.build</code>,
225
+   *    <code>ci.project</code>,
226
+   *    <code>ci.pipeline</code>
227
+   * </dd>
228
+   * </dl>
229
+   */
230
+  void setup(ws, runner) {
231
+    def imageLabels = [
232
+      "jenkins.job": ws.env.JOB_NAME,
233
+      "jenkins.build": ws.env.BUILD_ID,
234
+    ]
235
+
236
+    if (ws.params.ZUUL_REF) {
237
+      def patchset = PatchSet.fromZuul(ws.params)
238
+      ws.checkout(patchset.getSCM())
239
+      context["project"] = patchset.project.replaceAll('/', '-')
240
+      imageLabels["zuul.commit"] = patchset.commit
241
+    } else {
242
+      ws.checkout(ws.scm)
243
+      context["project"] = ws.env.JOB_NAME
244
+    }
245
+
246
+    imageLabels["ci.project"] = context['project']
247
+    imageLabels["ci.pipeline"] = pipeline.name
248
+
249
+    context["timestamp"] = timestampLabel()
250
+    context["imageLabels"] = imageLabels
251
+  }
252
+
253
+  /**
254
+   * Performs teardown steps, removing images and helm releases, and reporting
255
+   * back to Gerrit.
256
+   */
257
+  void teardown(ws, runner) {
258
+    try {
259
+      runner.removeImages(context.getAll("imageID"))
260
+    } catch (all) {}
261
+
262
+    try {
263
+      runner.purgeReleases(context.getAll("releaseName"))
264
+    } catch (all) {}
265
+
266
+    for (def imageName in context.getAll("publishedImage")) {
267
+      runner.reportToGerrit(image)
268
+    }
269
+  }
270
+
271
+  /**
272
+   * Builds the configured Blubber variant.
273
+   *
274
+   * <h3>Configuration</h3>
275
+   * <dl>
276
+   * <dt><code>build</code></dt>
277
+   * <dd>Blubber variant name</dd>
278
+   * </dl>
279
+   *
280
+   * <h3>Example</h3>
281
+   * <pre><code>
282
+   *   stages:
283
+   *     - name: candidate
284
+   *       build: production
285
+   * </code></pre>
286
+   *
287
+   * <h3>Exports</h3>
288
+   * <dl>
289
+   * <dt><code>${[stage].imageID}</code></dt>
290
+   * <dd>Image ID of built image.</dd>
291
+   * </dl>
292
+   */
293
+  void build(ws, runner) {
294
+    def imageID = runner.build(context % config.build, context["setup.imageLabels"])
295
+
296
+    context["imageID"] = imageID
297
+  }
298
+
299
+  /**
300
+   * Runs the entry point of a built image variant.
301
+   *
302
+   * <h3>Configuration</h3>
303
+   * <dl>
304
+   * <dt><code>run</code></dt>
305
+   * <dd>Image to run and entry-point arguments</dd>
306
+   * <dd>Specifying <code>run: true</code> expands to
307
+   *   <code>run: { image: '${.imageID}' }</code>
308
+   *   (i.e. the image built in this stage)</dd>
309
+   * <dd>
310
+   *   <dl>
311
+   *     <dt><code>image</code></dt>
312
+   *     <dd>An image to run</dd>
313
+   *     <dd>Default: <code>{$.imageID}</code></dd>
314
+   *
315
+   *     <dt><code>arguments</code></dt>
316
+   *     <dd>Entry-point arguments</dd>
317
+   *     <dd>Default: <code>[]</code></dd>
318
+   *   </dl>
319
+   * </dd>
320
+   * </dl>
321
+   *
322
+   * <h3>Example</h3>
323
+   * <pre><code>
324
+   *   stages:
325
+   *     - name: test
326
+   *       build: test
327
+   *       run: true
328
+   * </code></pre>
329
+   *
330
+   * <h3>Example</h3>
331
+   * <pre><code>
332
+   *   stages:
333
+   *     - name: built
334
+   *     - name: lint
335
+   *       run:
336
+   *         image: '${built.imageID}'
337
+   *         arguments: [lint]
338
+   *     - name: test
339
+   *       run:
340
+   *         image: '${built.imageID}'
341
+   *         arguments: [test]
342
+   * </code></pre>
343
+   */
344
+  void run(ws, runner) {
345
+    runner.run(
346
+      context % config.run.image,
347
+      config.run.arguments.collect { context % it },
348
+    )
349
+  }
350
+
351
+  /**
352
+   * Publish artifacts, either files or a built image variant (pushed to the
353
+   * WMF Docker registry).
354
+   *
355
+   * <h3>Configuration</h3>
356
+   * <dl>
357
+   * <dt><code>publish</code></dt>
358
+   * <dd>
359
+   *   <dl>
360
+   *     <dt><code>image</code></dt>
361
+   *     <dd>Publish an to the WMF Docker registry</dd>
362
+   *     <dd>
363
+   *       <dl>
364
+   *         <dt>id</dt>
365
+   *         <dd>ID of a previously built image variant</dd>
366
+   *         <dd>Default: <code>${.imageID}</code> (image built in this stage)</dd>
367
+   *
368
+   *         <dt>name</dt>
369
+   *         <dd>Published name of the image. Note that this base name will be
370
+   *         prefixed with the globally configured registry/repository name
371
+   *         before being pushed.</dd>
372
+   *         <dd>Default: <code>${setup.project}</code> (project identifier;
373
+   *         see {@link setup()})</dd>
374
+   *
375
+   *         <dt>tag</dt>
376
+   *         <dd>Primary tag under which the image is published</dd>
377
+   *         <dd>Default: <code>${setup.timestamp}-${.stage}</code></dd>
378
+   *
379
+   *         <dt>tags</dt>
380
+   *         <dd>Additional tags under which to publish the image</dd>
381
+   *       </dl>
382
+   *     </dd>
383
+   *   </dl>
384
+   * </dd>
385
+   * <dd>
386
+   *   <dl>
387
+   *     <dt><code>files</code></dt>
388
+   *     <dd>Extract and save files from a previously built image variant</dd>
389
+   *     <dd>
390
+   *       <dl>
391
+   *         <dt>paths</dt>
392
+   *         <dd>Globbed file paths resolving any number of files under the
393
+   *         image's root filesystem</dd>
394
+   *       </dl>
395
+   *     </dd>
396
+   *   </dl>
397
+   * </dd>
398
+   * </dl>
399
+   *
400
+   * <h3>Exports</h3>
401
+   * <dl>
402
+   * <dt><code>${[stage].imageName}</code></dt>
403
+   * <dd>Short name under which the image was published</dd>
404
+   *
405
+   * <dt><code>${[stage].imageFullName}</code></dt>
406
+   * <dd>Fully qualified name (registry/repository/imageName) under which the
407
+   * image was published</dd>
408
+   *
409
+   * <dt><code>${[stage].imageTag}</code></dt>
410
+   * <dd>Primary tag under which the image was published</dd>
411
+   *
412
+   * <dt><code>${[stage].publishedImage}</code></dt>
413
+   * <dd>Full qualified name and tag (<code>${.imageFullName}:${.imageTag}</code>)</dd>
414
+   * </dl>
415
+   */
416
+  void publish(ws, runner) {
417
+    if (config.publish.image) {
418
+      def imageName = context % publisher.name
419
+
420
+      for (def tag in ([publisher.tag] + publisher.tags)) {
421
+        runner.registerAs(
422
+          context % publisher.image,
423
+          imageName,
424
+          context % tag,
425
+        )
426
+      }
427
+
428
+      context["imageName"] = imageName
429
+      context["imageFullName"] = runner.qualifyRegistryPath(imageName)
430
+      context["imageTag"] = context % publisher.tag
431
+      context["publishedImage"] = context % '${.imageFullName}:${.imageTag}'
432
+    }
433
+
434
+    if (config.publish.files) {
435
+      // TODO
436
+    }
437
+  }
438
+
439
+  /**
440
+   * Deploy a published image to a WMF k8s cluster. (Currently only the "ci"
441
+   * cluster is supported for testing.)
442
+   *
443
+   * <h3>Configuration</h3>
444
+   * <dl>
445
+   * <dt><code>deploy</code></dt>
446
+   * <dd>
447
+   *   <dl>
448
+   *     <dt>image</dt>
449
+   *     <dd>Reference to a previously published image</dd>
450
+   *     <dd>Default: <code>${.publishedImage}</code> (image published in the
451
+   *     {@link publish() publish step} of this stage)</dd>
452
+   *
453
+   *     <dt>cluster</dt>
454
+   *     <dd>Cluster to target</dd>
455
+   *     <dd>Default: <code>"ci"</code></dd>
456
+   *     <dd>Currently only "ci" is supported and this configuration is
457
+   *     effectively ignored</dd>
458
+   *
459
+   *     <dt>chart</dt>
460
+   *     <dd>URL of Helm chart to use for deployment</dd>
461
+   *     <dd>Required</dd>
462
+   *
463
+   *     <dt>test</dt>
464
+   *     <dd>Whether to run <code>helm test</code> against this deployment</dd>
465
+   *     <dd>Default: <code>true</code></dd>
466
+   *   </dl>
467
+   * </dd>
468
+   * </dl>
469
+   *
470
+   * <h3>Exports</h3>
471
+   * <dl>
472
+   * <dt><code>${[stage].releaseName}</code></dt>
473
+   * <dd>Release name of new deployment</dd>
474
+   * </dl>
475
+   */
476
+  void deploy(ws, runner) {
477
+    def release = runner.deployWithChart(
478
+      context % config.deploy.chart,
479
+      context % config.deploy.image,
480
+      context % config.deploy.tag,
481
+    )
482
+
483
+    context["releaseName"] = release
484
+
485
+    if (config.deploy.test) {
486
+      runner.testRelease(release)
487
+    }
488
+  }
489
+
490
+  /**
491
+   * Binds a number of new values for reference in subsequent stages.
492
+   *
493
+   * <h3>Configuration</h3>
494
+   * <dl>
495
+   * <dt><code>exports</code></dt>
496
+   * <dd>Name/value pairs for additional exports.</dd>
497
+   * </dl>
498
+   *
499
+   * <h3>Example</h3>
500
+   * <pre><code>
501
+   *   stages:
502
+   *     - name: candidate
503
+   *       build: production
504
+   *       exports:
505
+   *         image: '${.imageID}'
506
+   *         tag: '${.imageTag}-my-tag'
507
+   *     - name: published
508
+   *       publish:
509
+   *         image:
510
+   *           id: '${candidate.image}'
511
+   *           tags: ['${candidate.tag}']
512
+   * </code></pre>
513
+   *
514
+   * <h3>Exports</h3>
515
+   * <dl>
516
+   * <dt><code>${[name].[value]}</code></dt>
517
+   * <dd>Each configured name/value pair.</dd>
518
+   * </dl>
519
+   */
520
+  void exports(ws, runner) {
521
+    for (def name in exports) {
522
+      context[name] = context % exports[name]
523
+    }
524
+  }
525
+}

+ 17
- 0
src/org/wikimedia/integration/Utility.groovy View File

@@ -25,6 +25,16 @@ class Utility {
25 25
   }
26 26
 
27 27
   /**
28
+   * Quotes all given shell arguments.
29
+   *
30
+   * @param arguments Shell argument.
31
+   * @return Quoted shell arguments.
32
+   */
33
+  static String args(List arguments) {
34
+    arguments.collect { arg(it) }.join(" ")
35
+  }
36
+
37
+  /**
28 38
    * Returns a random alpha-numeric string that's length long.
29 39
    *
30 40
    * @param length Desired length of string.
@@ -32,4 +42,11 @@ class Utility {
32 42
   static String randomAlphanum(length) {
33 43
     (1..length).collect { alphanums[random.nextInt(alphanums.size())] }.join()
34 44
   }
45
+
46
+  /**
47
+   * Returns a timestamp suitable for use in image names, tags, etc.
48
+   */
49
+  static String timestampLabel() {
50
+    new Date().format("yyyy-MM-dd-HH-mmss", TimeZone.getTimeZone("UTC"))
51
+  }
35 52
 }

+ 9
- 2
test/org/wikimedia/integration/PipelineRunnerTest.groovy View File

@@ -35,6 +35,12 @@ class PipelineRunnerTest extends GroovyTestCase {
35 35
     assert pipeline.getConfigFile("bar") == "foo/bar"
36 36
   }
37 37
 
38
+  void testGetTempFile() {
39
+    def pipeline = new PipelineRunner(new WorkflowScript(), configPath: "foo")
40
+
41
+    assert pipeline.getTempFile("bar") ==~ /^foo\/bar[a-z0-9]+$/
42
+  }
43
+
38 44
   void testQualifyRegistryPath() {
39 45
     def pipeline = new PipelineRunner(new WorkflowScript())
40 46
 
@@ -85,12 +91,13 @@ class PipelineRunnerTest extends GroovyTestCase {
85 91
 
86 92
     mockWorkflow.demand.writeFile { args ->
87 93
       assert args.text == "BASE: foo\n"
88
-      assert args.file == ".pipeline/Dockerfile"
94
+      assert args.file ==~ /^\.pipeline\/Dockerfile\.[a-z0-9]+$/
89 95
     }
90 96
 
91 97
     mockWorkflow.demand.sh { args ->
92 98
       assert args.returnStdout
93
-      assert args.script == "docker build --pull --label 'foo=a' --label 'bar=b' --file '.pipeline/Dockerfile' ."
99
+      assert args.script ==~ (/^docker build --pull --label 'foo=a' --label 'bar=b' / +
100
+                            /--file '\.pipeline\/Dockerfile\.[a-z0-9]+' \.$/)
94 101
 
95 102
       // Mock `docker build` output to test that we correctly parse the image ID
96 103
       return "Removing intermediate container foo\n" +

+ 44
- 0
test/org/wikimedia/integration/PipelineStageTest.groovy View File

@@ -0,0 +1,44 @@
1
+import groovy.mock.interceptor.MockFor
2
+import static groovy.test.GroovyAssert.*
3
+import groovy.util.GroovyTestCase
4
+
5
+import org.wikimedia.integration.PipelineStage
6
+import org.wikimedia.integration.ExecutionGraph
7
+import org.wikimedia.integration.ExecutionContext
8
+
9
+class PipelineStageTest extends GroovyTestCase {
10
+  void testPipelineStage_defaultConfig() {
11
+    // shorthand with just name is: build and run a variant
12
+    def shortHand = [name: "foo"]
13
+    assert PipelineStage.defaultConfig(shortHand) == [
14
+      name: "foo",
15
+      build: '${.stage}',
16
+      run: [
17
+        image: '${.imageID}',
18
+        arguments: [],
19
+      ],
20
+    ]
21
+
22
+    // run: true means run the built image
23
+    def runTrue = [name: "foo", build: "foo", run: true]
24
+    assert PipelineStage.defaultConfig(runTrue) == [
25
+      name: "foo",
26
+      build: "foo",
27
+      run: [
28
+        image: '${.imageID}',
29
+        arguments: [],
30
+      ],
31
+    ]
32
+
33
+    def defaultPublishImage = PipelineStage.defaultConfig([publish: [image: true]])
34
+
35
+    // publish.image.id defaults to the previously built image
36
+    assert defaultPublishImage.publish.image.id == '${.imageID}'
37
+
38
+    // publish.image.name defaults to the project name
39
+    assert defaultPublishImage.publish.image.name == '${setup.project}'
40
+
41
+    // publish.image.tag defaults to {timestamp}-{stage name}
42
+    assert defaultPublishImage.publish.image.tag == '${setup.timestamp}-${.stage}'
43
+  }
44
+}

+ 93
- 0
test/org/wikimedia/integration/PipelineTest.groovy View File

@@ -0,0 +1,93 @@
1
+import groovy.mock.interceptor.MockFor
2
+import static groovy.test.GroovyAssert.*
3
+import groovy.util.GroovyTestCase
4
+
5
+import org.wikimedia.integration.Pipeline
6
+
7
+class PipelineTest extends GroovyTestCase {
8
+  void testConstructor() {
9
+    def pipeline = new Pipeline("foo", [
10
+      blubberfile: "bar/blubber.yaml",
11
+      directory: "src/foo",
12
+      stages: [
13
+        [name: "unit"],
14
+        [name: "lint"],
15
+        [name: "candidate"],
16
+        [name: "production"],
17
+      ],
18
+      execution: [
19
+        ["unit", "candidate", "production"],
20
+        ["lint", "candidate", "production"],
21
+      ],
22
+    ])
23
+
24
+    assert pipeline.blubberfile == "bar/blubber.yaml"
25
+    assert pipeline.directory == "src/foo"
26
+    assert pipeline.execution == [
27
+      ["unit", "candidate", "production"],
28
+      ["lint", "candidate", "production"],
29
+    ]
30
+  }
31
+
32
+  void testConstructor_defaults() {
33
+    def pipeline = new Pipeline("foo", [
34
+      directory: "src/foo",
35
+      stages: [
36
+        [name: "unit"],
37
+        [name: "lint"],
38
+        [name: "candidate"],
39
+        [name: "production"],
40
+      ],
41
+    ])
42
+
43
+    assert pipeline.blubberfile == "foo/blubber.yaml"
44
+
45
+    assert pipeline.execution == [
46
+      ["unit", "lint", "candidate", "production"],
47
+    ]
48
+  }
49
+
50
+  void testRunner() {
51
+    def pipeline = new Pipeline("foo", [
52
+      directory: "src/foo/",
53
+      stages: [],
54
+    ])
55
+
56
+    assert pipeline.runner().configPath == "../../.pipeline"
57
+  }
58
+
59
+  void testRunner_currentDirectory() {
60
+    def pipeline = new Pipeline("foo", [
61
+      directory: ".",
62
+      stages: [],
63
+    ])
64
+
65
+    assert pipeline.runner().configPath == ".pipeline"
66
+  }
67
+
68
+  void testValidate_setupReserved() {
69
+    def pipeline = new Pipeline("foo", [
70
+      stages: [[name: "setup"]],
71
+    ])
72
+
73
+    def e = shouldFail(Pipeline.ValidationException) {
74
+      pipeline.validate()
75
+    }
76
+
77
+    assert e.errors.size() == 1
78
+    assert e.errors[0] == "setup is a reserved stage name"
79
+  }
80
+
81
+  void testValidate_teardownReserved() {
82
+    def pipeline = new Pipeline("foo", [
83
+      stages: [[name: "teardown"]],
84
+    ])
85
+
86
+    def e = shouldFail(Pipeline.ValidationException) {
87
+      pipeline.validate()
88
+    }
89
+
90
+    assert e.errors.size() == 1
91
+    assert e.errors[0] == "teardown is a reserved stage name"
92
+  }
93
+}

+ 4
- 0
test/org/wikimedia/integration/UtilityTest.groovy View File

@@ -7,6 +7,10 @@ class UtilityTestCase extends GroovyTestCase {
7 7
     assert arg("foo bar'\n baz") == """'foo bar'\\''\n baz'"""
8 8
   }
9 9
 
10
+  void testArgs() {
11
+    assert args(["foo bar'\n baz", "qux"]) == """'foo bar'\\''\n baz' 'qux'"""
12
+  }
13
+
10 14
   void testRandomAlphanum() {
11 15
     def expectedChars = ('a'..'z') + ('0'..'9')
12 16
     def alphanum = randomAlphanum(12)

Loading…
Cancel
Save