| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 package main | 5 package main |
| 6 | 6 |
| 7 /* | 7 /* |
| 8 Generate the tasks.json file. | 8 Generate the tasks.json file. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 import ( | 11 import ( |
| 12 "bytes" | |
| 13 "encoding/json" | 12 "encoding/json" |
| 14 "flag" | |
| 15 "fmt" | 13 "fmt" |
| 16 "io/ioutil" | |
| 17 "os" | 14 "os" |
| 18 "path" | 15 "path" |
| 19 "path/filepath" | 16 "path/filepath" |
| 20 "sort" | 17 "sort" |
| 21 "strings" | 18 "strings" |
| 22 | 19 |
| 23 "github.com/skia-dev/glog" | 20 "github.com/skia-dev/glog" |
| 24 "go.skia.org/infra/go/common" | |
| 25 "go.skia.org/infra/go/util" | 21 "go.skia.org/infra/go/util" |
| 26 "go.skia.org/infra/task_scheduler/go/specs" | 22 "go.skia.org/infra/task_scheduler/go/specs" |
| 27 ) | 23 ) |
| 28 | 24 |
| 29 const ( | 25 const ( |
| 30 DEFAULT_OS = "Ubuntu" | 26 DEFAULT_OS = "Ubuntu" |
| 31 | 27 |
| 32 // Pool for Skia bots. | 28 // Pool for Skia bots. |
| 33 POOL_SKIA = "Skia" | 29 POOL_SKIA = "Skia" |
| 34 | 30 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 51 // UPLOAD_DIMENSIONS are the Swarming dimensions for upload tasks. | 47 // UPLOAD_DIMENSIONS are the Swarming dimensions for upload tasks. |
| 52 UPLOAD_DIMENSIONS = []string{ | 48 UPLOAD_DIMENSIONS = []string{ |
| 53 "cpu:x86-64-avx2", | 49 "cpu:x86-64-avx2", |
| 54 "gpu:none", | 50 "gpu:none", |
| 55 "os:Ubuntu", | 51 "os:Ubuntu", |
| 56 fmt.Sprintf("pool:%s", POOL_SKIA), | 52 fmt.Sprintf("pool:%s", POOL_SKIA), |
| 57 } | 53 } |
| 58 | 54 |
| 59 // Defines the structure of job names. | 55 // Defines the structure of job names. |
| 60 jobNameSchema *JobNameSchema | 56 jobNameSchema *JobNameSchema |
| 61 | |
| 62 // Caches CIPD package info so that we don't have to re-read VERSION | |
| 63 // files. | |
| 64 cipdPackages = map[string]*specs.CipdPackage{} | |
| 65 | |
| 66 // Path to the infra/bots directory. | |
| 67 infrabotsDir = "" | |
| 68 | |
| 69 // Flags. | |
| 70 testing = flag.Bool("test", false, "Run in test mode: verify that the ou
tput hasn't changed.") | |
| 71 ) | 57 ) |
| 72 | 58 |
| 73 // deriveCompileTaskName returns the name of a compile task based on the given | 59 // deriveCompileTaskName returns the name of a compile task based on the given |
| 74 // job name. | 60 // job name. |
| 75 func deriveCompileTaskName(jobName string, parts map[string]string) string { | 61 func deriveCompileTaskName(jobName string, parts map[string]string) string { |
| 76 if parts["role"] == "Housekeeper" { | 62 if parts["role"] == "Housekeeper" { |
| 77 return "Build-Ubuntu-GCC-x86_64-Release-Shared" | 63 return "Build-Ubuntu-GCC-x86_64-Release-Shared" |
| 78 } else if parts["role"] == "Test" || parts["role"] == "Perf" { | 64 } else if parts["role"] == "Test" || parts["role"] == "Perf" { |
| 79 task_os := parts["os"] | 65 task_os := parts["os"] |
| 80 ec := parts["extra_config"] | 66 ec := parts["extra_config"] |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 183 d["gpu"] = "none" | 169 d["gpu"] = "none" |
| 184 } | 170 } |
| 185 rv := make([]string, 0, len(d)) | 171 rv := make([]string, 0, len(d)) |
| 186 for k, v := range d { | 172 for k, v := range d { |
| 187 rv = append(rv, fmt.Sprintf("%s:%s", k, v)) | 173 rv = append(rv, fmt.Sprintf("%s:%s", k, v)) |
| 188 } | 174 } |
| 189 sort.Strings(rv) | 175 sort.Strings(rv) |
| 190 return rv | 176 return rv |
| 191 } | 177 } |
| 192 | 178 |
| 193 // getCipdPackage finds and returns the given CIPD package and version. | |
| 194 func getCipdPackage(assetName string) *specs.CipdPackage { | |
| 195 if pkg, ok := cipdPackages[assetName]; ok { | |
| 196 return pkg | |
| 197 } | |
| 198 versionFile := path.Join(infrabotsDir, "assets", assetName, "VERSION") | |
| 199 contents, err := ioutil.ReadFile(versionFile) | |
| 200 if err != nil { | |
| 201 glog.Fatal(err) | |
| 202 } | |
| 203 version := strings.TrimSpace(string(contents)) | |
| 204 pkg := &specs.CipdPackage{ | |
| 205 Name: fmt.Sprintf("skia/bots/%s", assetName), | |
| 206 Path: assetName, | |
| 207 Version: fmt.Sprintf("version:%s", version), | |
| 208 } | |
| 209 if assetName == "win_toolchain" { | |
| 210 pkg.Path = "t" // Workaround for path length limit on Windows. | |
| 211 } | |
| 212 cipdPackages[assetName] = pkg | |
| 213 return pkg | |
| 214 } | |
| 215 | |
| 216 // compile generates a compile task. Returns the name of the last task in the | 179 // compile generates a compile task. Returns the name of the last task in the |
| 217 // generated chain of tasks, which the Job should add as a dependency. | 180 // generated chain of tasks, which the Job should add as a dependency. |
| 218 func compile(cfg *specs.TasksCfg, name string, parts map[string]string) string { | 181 func compile(b *specs.TasksCfgBuilder, name string, parts map[string]string) str
ing { |
| 219 // Collect the necessary CIPD packages. | 182 // Collect the necessary CIPD packages. |
| 220 pkgs := []*specs.CipdPackage{} | 183 pkgs := []*specs.CipdPackage{} |
| 221 | 184 |
| 222 // Android bots require a toolchain. | 185 // Android bots require a toolchain. |
| 223 if strings.Contains(name, "Android") { | 186 if strings.Contains(name, "Android") { |
| 224 » » pkgs = append(pkgs, getCipdPackage("android_sdk")) | 187 » » pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("android_sdk")
) |
| 225 if strings.Contains(name, "Mac") { | 188 if strings.Contains(name, "Mac") { |
| 226 » » » pkgs = append(pkgs, getCipdPackage("android_ndk_darwin")
) | 189 » » » pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("andro
id_ndk_darwin")) |
| 227 } else { | 190 } else { |
| 228 » » » pkgs = append(pkgs, getCipdPackage("android_ndk_linux")) | 191 » » » pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("andro
id_ndk_linux")) |
| 229 } | 192 } |
| 230 } | 193 } |
| 231 | 194 |
| 232 // Clang on Linux. | 195 // Clang on Linux. |
| 233 if strings.Contains(name, "Ubuntu") && strings.Contains(name, "Clang") { | 196 if strings.Contains(name, "Ubuntu") && strings.Contains(name, "Clang") { |
| 234 » » pkgs = append(pkgs, getCipdPackage("clang_linux")) | 197 » » pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux")
) |
| 235 } | 198 } |
| 236 | 199 |
| 237 // Windows toolchain. | 200 // Windows toolchain. |
| 238 if strings.Contains(name, "Win") { | 201 if strings.Contains(name, "Win") { |
| 239 » » pkgs = append(pkgs, getCipdPackage("win_toolchain")) | 202 » » pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("win_toolchain
")) |
| 240 if strings.Contains(name, "Vulkan") { | 203 if strings.Contains(name, "Vulkan") { |
| 241 » » » pkgs = append(pkgs, getCipdPackage("win_vulkan_sdk")) | 204 » » » pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("win_v
ulkan_sdk")) |
| 242 } | 205 } |
| 243 } | 206 } |
| 244 | 207 |
| 245 // Add the task. | 208 // Add the task. |
| 246 » cfg.Tasks[name] = &specs.TaskSpec{ | 209 » b.MustAddTask(name, &specs.TaskSpec{ |
| 247 CipdPackages: pkgs, | 210 CipdPackages: pkgs, |
| 248 Dimensions: swarmDimensions(parts), | 211 Dimensions: swarmDimensions(parts), |
| 249 ExtraArgs: []string{ | 212 ExtraArgs: []string{ |
| 250 "--workdir", "../../..", "swarm_compile", | 213 "--workdir", "../../..", "swarm_compile", |
| 251 "repository=skia", | 214 "repository=skia", |
| 252 fmt.Sprintf("buildername=%s", name), | 215 fmt.Sprintf("buildername=%s", name), |
| 253 "mastername=fake-master", | 216 "mastername=fake-master", |
| 254 "buildnumber=2", | 217 "buildnumber=2", |
| 255 "slavename=fake-buildslave", | 218 "slavename=fake-buildslave", |
| 256 "nobuildbot=True", | 219 "nobuildbot=True", |
| 257 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), | 220 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), |
| 258 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), | 221 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), |
| 259 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), | 222 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), |
| 260 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), | 223 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), |
| 261 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), | 224 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), |
| 262 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), | 225 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), |
| 263 }, | 226 }, |
| 264 Isolate: "compile_skia.isolate", | 227 Isolate: "compile_skia.isolate", |
| 265 Priority: 0.8, | 228 Priority: 0.8, |
| 266 » } | 229 » }) |
| 267 return name | 230 return name |
| 268 } | 231 } |
| 269 | 232 |
| 270 // recreateSKPs generates a RecreateSKPs task. Returns the name of the last | 233 // recreateSKPs generates a RecreateSKPs task. Returns the name of the last |
| 271 // task in the generated chain of tasks, which the Job should add as a | 234 // task in the generated chain of tasks, which the Job should add as a |
| 272 // dependency. | 235 // dependency. |
| 273 func recreateSKPs(cfg *specs.TasksCfg, name string) string { | 236 func recreateSKPs(b *specs.TasksCfgBuilder, name string) string { |
| 274 // TODO | 237 // TODO |
| 275 return name | 238 return name |
| 276 } | 239 } |
| 277 | 240 |
| 278 // ctSKPs generates a CT SKPs task. Returns the name of the last task in the | 241 // ctSKPs generates a CT SKPs task. Returns the name of the last task in the |
| 279 // generated chain of tasks, which the Job should add as a dependency. | 242 // generated chain of tasks, which the Job should add as a dependency. |
| 280 func ctSKPs(cfg *specs.TasksCfg, name string) string { | 243 func ctSKPs(b *specs.TasksCfgBuilder, name string) string { |
| 281 // TODO | 244 // TODO |
| 282 return name | 245 return name |
| 283 } | 246 } |
| 284 | 247 |
| 285 // housekeeper generates a Housekeeper task. Returns the name of the last task | 248 // housekeeper generates a Housekeeper task. Returns the name of the last task |
| 286 // in the generated chain of tasks, which the Job should add as a dependency. | 249 // in the generated chain of tasks, which the Job should add as a dependency. |
| 287 func housekeeper(cfg *specs.TasksCfg, name, compileTaskName string) string { | 250 func housekeeper(b *specs.TasksCfgBuilder, name, compileTaskName string) string
{ |
| 288 // TODO | 251 // TODO |
| 289 return name | 252 return name |
| 290 } | 253 } |
| 291 | 254 |
| 292 // infra generates an infra_tests task. Returns the name of the last task in the | 255 // infra generates an infra_tests task. Returns the name of the last task in the |
| 293 // generated chain of tasks, which the Job should add as a dependency. | 256 // generated chain of tasks, which the Job should add as a dependency. |
| 294 func infra(cfg *specs.TasksCfg, name string) string { | 257 func infra(b *specs.TasksCfgBuilder, name string) string { |
| 295 » cfg.Tasks[name] = &specs.TaskSpec{ | 258 » b.MustAddTask(name, &specs.TaskSpec{ |
| 296 CipdPackages: []*specs.CipdPackage{}, | 259 CipdPackages: []*specs.CipdPackage{}, |
| 297 Dimensions: UPLOAD_DIMENSIONS, | 260 Dimensions: UPLOAD_DIMENSIONS, |
| 298 ExtraArgs: []string{ | 261 ExtraArgs: []string{ |
| 299 "--workdir", "../../..", "swarm_infra", | 262 "--workdir", "../../..", "swarm_infra", |
| 300 "repository=skia", | 263 "repository=skia", |
| 301 fmt.Sprintf("buildername=%s", name), | 264 fmt.Sprintf("buildername=%s", name), |
| 302 "mastername=fake-master", | 265 "mastername=fake-master", |
| 303 "buildnumber=2", | 266 "buildnumber=2", |
| 304 "slavename=fake-buildslave", | 267 "slavename=fake-buildslave", |
| 305 "nobuildbot=True", | 268 "nobuildbot=True", |
| 306 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), | 269 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), |
| 307 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), | 270 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), |
| 308 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), | 271 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), |
| 309 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), | 272 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), |
| 310 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), | 273 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), |
| 311 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), | 274 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), |
| 312 }, | 275 }, |
| 313 Isolate: "infra_skia.isolate", | 276 Isolate: "infra_skia.isolate", |
| 314 Priority: 0.8, | 277 Priority: 0.8, |
| 315 » } | 278 » }) |
| 316 return name | 279 return name |
| 317 } | 280 } |
| 318 | 281 |
| 319 // doUpload indicates whether the given Job should upload its results. | 282 // doUpload indicates whether the given Job should upload its results. |
| 320 func doUpload(name string) bool { | 283 func doUpload(name string) bool { |
| 321 skipUploadBots := []string{ | 284 skipUploadBots := []string{ |
| 322 "ASAN", | 285 "ASAN", |
| 323 "Coverage", | 286 "Coverage", |
| 324 "MSAN", | 287 "MSAN", |
| 325 "TSAN", | 288 "TSAN", |
| 326 "UBSAN", | 289 "UBSAN", |
| 327 "Valgrind", | 290 "Valgrind", |
| 328 } | 291 } |
| 329 for _, s := range skipUploadBots { | 292 for _, s := range skipUploadBots { |
| 330 if strings.Contains(name, s) { | 293 if strings.Contains(name, s) { |
| 331 return false | 294 return false |
| 332 } | 295 } |
| 333 } | 296 } |
| 334 return true | 297 return true |
| 335 } | 298 } |
| 336 | 299 |
| 337 // test generates a Test task. Returns the name of the last task in the | 300 // test generates a Test task. Returns the name of the last task in the |
| 338 // generated chain of tasks, which the Job should add as a dependency. | 301 // generated chain of tasks, which the Job should add as a dependency. |
| 339 func test(cfg *specs.TasksCfg, name string, parts map[string]string, compileTask
Name string, pkgs []*specs.CipdPackage) string { | 302 func test(b *specs.TasksCfgBuilder, name string, parts map[string]string, compil
eTaskName string, pkgs []*specs.CipdPackage) string { |
| 340 » cfg.Tasks[name] = &specs.TaskSpec{ | 303 » b.MustAddTask(name, &specs.TaskSpec{ |
| 341 CipdPackages: pkgs, | 304 CipdPackages: pkgs, |
| 342 Dependencies: []string{compileTaskName}, | 305 Dependencies: []string{compileTaskName}, |
| 343 Dimensions: swarmDimensions(parts), | 306 Dimensions: swarmDimensions(parts), |
| 344 ExtraArgs: []string{ | 307 ExtraArgs: []string{ |
| 345 "--workdir", "../../..", "swarm_test", | 308 "--workdir", "../../..", "swarm_test", |
| 346 "repository=skia", | 309 "repository=skia", |
| 347 fmt.Sprintf("buildername=%s", name), | 310 fmt.Sprintf("buildername=%s", name), |
| 348 "mastername=fake-master", | 311 "mastername=fake-master", |
| 349 "buildnumber=2", | 312 "buildnumber=2", |
| 350 "slavename=fake-buildslave", | 313 "slavename=fake-buildslave", |
| 351 "nobuildbot=True", | 314 "nobuildbot=True", |
| 352 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), | 315 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), |
| 353 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), | 316 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), |
| 354 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), | 317 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), |
| 355 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), | 318 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), |
| 356 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), | 319 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), |
| 357 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), | 320 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), |
| 358 }, | 321 }, |
| 359 Isolate: "test_skia.isolate", | 322 Isolate: "test_skia.isolate", |
| 360 Priority: 0.8, | 323 Priority: 0.8, |
| 361 » } | 324 » }) |
| 362 // Upload results if necessary. | 325 // Upload results if necessary. |
| 363 if doUpload(name) { | 326 if doUpload(name) { |
| 364 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema
.Sep, name) | 327 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema
.Sep, name) |
| 365 » » cfg.Tasks[uploadName] = &specs.TaskSpec{ | 328 » » b.MustAddTask(uploadName, &specs.TaskSpec{ |
| 366 Dependencies: []string{name}, | 329 Dependencies: []string{name}, |
| 367 Dimensions: UPLOAD_DIMENSIONS, | 330 Dimensions: UPLOAD_DIMENSIONS, |
| 368 ExtraArgs: []string{ | 331 ExtraArgs: []string{ |
| 369 "--workdir", "../../..", "upload_dm_results", | 332 "--workdir", "../../..", "upload_dm_results", |
| 370 "repository=skia", | 333 "repository=skia", |
| 371 fmt.Sprintf("buildername=%s", name), | 334 fmt.Sprintf("buildername=%s", name), |
| 372 "mastername=fake-master", | 335 "mastername=fake-master", |
| 373 "buildnumber=2", | 336 "buildnumber=2", |
| 374 "slavename=fake-buildslave", | 337 "slavename=fake-buildslave", |
| 375 "nobuildbot=True", | 338 "nobuildbot=True", |
| 376 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE
R_ISOLATED_OUTDIR), | 339 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE
R_ISOLATED_OUTDIR), |
| 377 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV
ISION), | 340 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV
ISION), |
| 378 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDE
R_PATCH_STORAGE), | 341 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDE
R_PATCH_STORAGE), |
| 379 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_COD
EREVIEW_SERVER), | 342 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_COD
EREVIEW_SERVER), |
| 380 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE)
, | 343 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE)
, |
| 381 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PAT
CHSET), | 344 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PAT
CHSET), |
| 382 }, | 345 }, |
| 383 Isolate: "upload_dm_results.isolate", | 346 Isolate: "upload_dm_results.isolate", |
| 384 Priority: 0.8, | 347 Priority: 0.8, |
| 385 » » } | 348 » » }) |
| 386 return uploadName | 349 return uploadName |
| 387 } | 350 } |
| 388 return name | 351 return name |
| 389 } | 352 } |
| 390 | 353 |
| 391 // perf generates a Perf task. Returns the name of the last task in the | 354 // perf generates a Perf task. Returns the name of the last task in the |
| 392 // generated chain of tasks, which the Job should add as a dependency. | 355 // generated chain of tasks, which the Job should add as a dependency. |
| 393 func perf(cfg *specs.TasksCfg, name string, parts map[string]string, compileTask
Name string, pkgs []*specs.CipdPackage) string { | 356 func perf(b *specs.TasksCfgBuilder, name string, parts map[string]string, compil
eTaskName string, pkgs []*specs.CipdPackage) string { |
| 394 » cfg.Tasks[name] = &specs.TaskSpec{ | 357 » b.MustAddTask(name, &specs.TaskSpec{ |
| 395 CipdPackages: pkgs, | 358 CipdPackages: pkgs, |
| 396 Dependencies: []string{compileTaskName}, | 359 Dependencies: []string{compileTaskName}, |
| 397 Dimensions: swarmDimensions(parts), | 360 Dimensions: swarmDimensions(parts), |
| 398 ExtraArgs: []string{ | 361 ExtraArgs: []string{ |
| 399 "--workdir", "../../..", "swarm_perf", | 362 "--workdir", "../../..", "swarm_perf", |
| 400 "repository=skia", | 363 "repository=skia", |
| 401 fmt.Sprintf("buildername=%s", name), | 364 fmt.Sprintf("buildername=%s", name), |
| 402 "mastername=fake-master", | 365 "mastername=fake-master", |
| 403 "buildnumber=2", | 366 "buildnumber=2", |
| 404 "slavename=fake-buildslave", | 367 "slavename=fake-buildslave", |
| 405 "nobuildbot=True", | 368 "nobuildbot=True", |
| 406 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), | 369 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), |
| 407 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), | 370 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), |
| 408 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), | 371 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_
STORAGE), |
| 409 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), | 372 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_CODEREVIEW_
SERVER), |
| 410 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), | 373 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE), |
| 411 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), | 374 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PATCHSET), |
| 412 }, | 375 }, |
| 413 Isolate: "perf_skia.isolate", | 376 Isolate: "perf_skia.isolate", |
| 414 Priority: 0.8, | 377 Priority: 0.8, |
| 415 » } | 378 » }) |
| 416 // Upload results if necessary. | 379 // Upload results if necessary. |
| 417 if strings.Contains(name, "Release") && doUpload(name) { | 380 if strings.Contains(name, "Release") && doUpload(name) { |
| 418 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema
.Sep, name) | 381 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema
.Sep, name) |
| 419 » » cfg.Tasks[uploadName] = &specs.TaskSpec{ | 382 » » b.MustAddTask(uploadName, &specs.TaskSpec{ |
| 420 Dependencies: []string{name}, | 383 Dependencies: []string{name}, |
| 421 Dimensions: UPLOAD_DIMENSIONS, | 384 Dimensions: UPLOAD_DIMENSIONS, |
| 422 ExtraArgs: []string{ | 385 ExtraArgs: []string{ |
| 423 "--workdir", "../../..", "upload_nano_results", | 386 "--workdir", "../../..", "upload_nano_results", |
| 424 "repository=skia", | 387 "repository=skia", |
| 425 fmt.Sprintf("buildername=%s", name), | 388 fmt.Sprintf("buildername=%s", name), |
| 426 "mastername=fake-master", | 389 "mastername=fake-master", |
| 427 "buildnumber=2", | 390 "buildnumber=2", |
| 428 "slavename=fake-buildslave", | 391 "slavename=fake-buildslave", |
| 429 "nobuildbot=True", | 392 "nobuildbot=True", |
| 430 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE
R_ISOLATED_OUTDIR), | 393 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE
R_ISOLATED_OUTDIR), |
| 431 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV
ISION), | 394 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV
ISION), |
| 432 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDE
R_PATCH_STORAGE), | 395 fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDE
R_PATCH_STORAGE), |
| 433 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_COD
EREVIEW_SERVER), | 396 fmt.Sprintf("rietveld=%s", specs.PLACEHOLDER_COD
EREVIEW_SERVER), |
| 434 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE)
, | 397 fmt.Sprintf("issue=%s", specs.PLACEHOLDER_ISSUE)
, |
| 435 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PAT
CHSET), | 398 fmt.Sprintf("patchset=%s", specs.PLACEHOLDER_PAT
CHSET), |
| 436 }, | 399 }, |
| 437 Isolate: "upload_nano_results.isolate", | 400 Isolate: "upload_nano_results.isolate", |
| 438 Priority: 0.8, | 401 Priority: 0.8, |
| 439 » » } | 402 » » }) |
| 440 return uploadName | 403 return uploadName |
| 441 } | 404 } |
| 442 return name | 405 return name |
| 443 } | 406 } |
| 444 | 407 |
| 445 // process generates tasks and jobs for the given job name. | 408 // process generates tasks and jobs for the given job name. |
| 446 func process(cfg *specs.TasksCfg, name string) { | 409 func process(b *specs.TasksCfgBuilder, name string) { |
| 447 » if _, ok := cfg.Jobs[name]; ok { | |
| 448 » » glog.Fatalf("Duplicate job %q", name) | |
| 449 » } | |
| 450 deps := []string{} | 410 deps := []string{} |
| 451 | 411 |
| 452 parts, err := jobNameSchema.ParseJobName(name) | 412 parts, err := jobNameSchema.ParseJobName(name) |
| 453 if err != nil { | 413 if err != nil { |
| 454 glog.Fatal(err) | 414 glog.Fatal(err) |
| 455 } | 415 } |
| 456 | 416 |
| 457 // RecreateSKPs. | 417 // RecreateSKPs. |
| 458 if strings.Contains(name, "RecreateSKPs") { | 418 if strings.Contains(name, "RecreateSKPs") { |
| 459 » » deps = append(deps, recreateSKPs(cfg, name)) | 419 » » deps = append(deps, recreateSKPs(b, name)) |
| 460 } | 420 } |
| 461 | 421 |
| 462 // CT bots. | 422 // CT bots. |
| 463 if strings.Contains(name, "-CT_") { | 423 if strings.Contains(name, "-CT_") { |
| 464 » » deps = append(deps, ctSKPs(cfg, name)) | 424 » » deps = append(deps, ctSKPs(b, name)) |
| 465 } | 425 } |
| 466 | 426 |
| 467 // Infra tests. | 427 // Infra tests. |
| 468 if name == "Housekeeper-PerCommit-InfraTests" { | 428 if name == "Housekeeper-PerCommit-InfraTests" { |
| 469 » » deps = append(deps, infra(cfg, name)) | 429 » » deps = append(deps, infra(b, name)) |
| 470 } | 430 } |
| 471 | 431 |
| 472 // Compile bots. | 432 // Compile bots. |
| 473 if parts["role"] == "Build" { | 433 if parts["role"] == "Build" { |
| 474 » » deps = append(deps, compile(cfg, name, parts)) | 434 » » deps = append(deps, compile(b, name, parts)) |
| 475 } | 435 } |
| 476 | 436 |
| 477 // Any remaining bots need a compile task. | 437 // Any remaining bots need a compile task. |
| 478 compileTaskName := deriveCompileTaskName(name, parts) | 438 compileTaskName := deriveCompileTaskName(name, parts) |
| 479 compileTaskParts, err := jobNameSchema.ParseJobName(compileTaskName) | 439 compileTaskParts, err := jobNameSchema.ParseJobName(compileTaskName) |
| 480 if err != nil { | 440 if err != nil { |
| 481 glog.Fatal(err) | 441 glog.Fatal(err) |
| 482 } | 442 } |
| 483 // Temporarily disable the Housekeeper's compile Task, since we aren't | 443 // Temporarily disable the Housekeeper's compile Task, since we aren't |
| 484 // yet running that Job. | 444 // yet running that Job. |
| 485 if parts["role"] != "Housekeeper" { | 445 if parts["role"] != "Housekeeper" { |
| 486 » » compile(cfg, compileTaskName, compileTaskParts) | 446 » » compile(b, compileTaskName, compileTaskParts) |
| 487 } | 447 } |
| 488 | 448 |
| 489 // Housekeeper. | 449 // Housekeeper. |
| 490 if parts["role"] == "Housekeeper" && name != "Housekeeper-PerCommit-Infr
aTests" { | 450 if parts["role"] == "Housekeeper" && name != "Housekeeper-PerCommit-Infr
aTests" { |
| 491 » » deps = append(deps, housekeeper(cfg, name, compileTaskName)) | 451 » » deps = append(deps, housekeeper(b, name, compileTaskName)) |
| 492 } | 452 } |
| 493 | 453 |
| 494 // Common assets needed by the remaining bots. | 454 // Common assets needed by the remaining bots. |
| 495 pkgs := []*specs.CipdPackage{ | 455 pkgs := []*specs.CipdPackage{ |
| 496 » » getCipdPackage("skimage"), | 456 » » b.MustGetCipdPackageFromAsset("skimage"), |
| 497 » » getCipdPackage("skp"), | 457 » » b.MustGetCipdPackageFromAsset("skp"), |
| 498 » » getCipdPackage("svg"), | 458 » » b.MustGetCipdPackageFromAsset("svg"), |
| 499 } | 459 } |
| 500 | 460 |
| 501 // Test bots. | 461 // Test bots. |
| 502 if parts["role"] == "Test" { | 462 if parts["role"] == "Test" { |
| 503 » » deps = append(deps, test(cfg, name, parts, compileTaskName, pkgs
)) | 463 » » deps = append(deps, test(b, name, parts, compileTaskName, pkgs)) |
| 504 } | 464 } |
| 505 | 465 |
| 506 // Perf bots. | 466 // Perf bots. |
| 507 if parts["role"] == "Perf" { | 467 if parts["role"] == "Perf" { |
| 508 » » deps = append(deps, perf(cfg, name, parts, compileTaskName, pkgs
)) | 468 » » deps = append(deps, perf(b, name, parts, compileTaskName, pkgs)) |
| 509 } | 469 } |
| 510 | 470 |
| 511 // Add the Job spec. | 471 // Add the Job spec. |
| 512 » cfg.Jobs[name] = &specs.JobSpec{ | 472 » b.MustAddJob(name, &specs.JobSpec{ |
| 513 Priority: 0.8, | 473 Priority: 0.8, |
| 514 TaskSpecs: deps, | 474 TaskSpecs: deps, |
| 515 » } | 475 » }) |
| 516 } | 476 } |
| 517 | 477 |
| 518 // getCheckoutRoot returns the path of the root of the Skia checkout, or an | 478 // getCheckoutRoot returns the path of the root of the Skia checkout, or an |
| 519 // error if it cannot be found. | 479 // error if it cannot be found. |
| 520 func getCheckoutRoot() string { | 480 func getCheckoutRoot() string { |
| 521 cwd, err := os.Getwd() | 481 cwd, err := os.Getwd() |
| 522 if err != nil { | 482 if err != nil { |
| 523 glog.Fatal(err) | 483 glog.Fatal(err) |
| 524 } | 484 } |
| 525 for { | 485 for { |
| 526 if _, err := os.Stat(cwd); err != nil { | 486 if _, err := os.Stat(cwd); err != nil { |
| 527 glog.Fatal(err) | 487 glog.Fatal(err) |
| 528 } | 488 } |
| 529 s, err := os.Stat(path.Join(cwd, ".git")) | 489 s, err := os.Stat(path.Join(cwd, ".git")) |
| 530 if err == nil && s.IsDir() { | 490 if err == nil && s.IsDir() { |
| 531 // TODO(borenet): Should we verify that this is a Skia | 491 // TODO(borenet): Should we verify that this is a Skia |
| 532 // checkout and not something else? | 492 // checkout and not something else? |
| 533 return cwd | 493 return cwd |
| 534 } | 494 } |
| 535 cwd = filepath.Clean(path.Join(cwd, "..")) | 495 cwd = filepath.Clean(path.Join(cwd, "..")) |
| 536 } | 496 } |
| 537 } | 497 } |
| 538 | 498 |
| 539 // Regenerate the tasks.json file. | 499 // Regenerate the tasks.json file. |
| 540 func main() { | 500 func main() { |
| 541 » common.Init() | 501 » b := specs.MustNewTasksCfgBuilder() |
| 542 » defer common.LogPanic() | |
| 543 | |
| 544 » // Where are we? | |
| 545 » root := getCheckoutRoot() | |
| 546 » infrabotsDir = path.Join(root, "infra", "bots") | |
| 547 | |
| 548 // Create the JobNameSchema. | 502 // Create the JobNameSchema. |
| 549 » schema, err := NewJobNameSchema(path.Join(infrabotsDir, "recipe_modules"
, "builder_name_schema", "builder_name_schema.json")) | 503 » schema, err := NewJobNameSchema(path.Join(b.CheckoutRoot(), "infra", "bo
ts", "recipe_modules", "builder_name_schema", "builder_name_schema.json")) |
| 550 if err != nil { | 504 if err != nil { |
| 551 glog.Fatal(err) | 505 glog.Fatal(err) |
| 552 } | 506 } |
| 553 jobNameSchema = schema | 507 jobNameSchema = schema |
| 554 | 508 |
| 555 » // Create the config. | 509 » // Create Tasks and Jobs. |
| 556 » cfg := &specs.TasksCfg{ | 510 » for _, name := range JOBS { |
| 557 » » Jobs: map[string]*specs.JobSpec{}, | 511 » » process(b, name) |
| 558 » » Tasks: map[string]*specs.TaskSpec{}, | |
| 559 } | 512 } |
| 560 | 513 |
| 561 » // Create Tasks and Jobs. | 514 » b.MustFinish() |
| 562 » for _, j := range JOBS { | |
| 563 » » process(cfg, j) | |
| 564 » } | |
| 565 | |
| 566 » // Validate the config. | |
| 567 » if err := cfg.Validate(); err != nil { | |
| 568 » » glog.Fatal(err) | |
| 569 » } | |
| 570 | |
| 571 » // Write the tasks.json file. | |
| 572 » b, err := json.MarshalIndent(cfg, "", " ") | |
| 573 » if err != nil { | |
| 574 » » glog.Fatal(err) | |
| 575 » } | |
| 576 » // The json package escapes HTML characters, which makes our output | |
| 577 » // much less readable. Replace the escape characters with the real | |
| 578 » // character. | |
| 579 » b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) | |
| 580 | |
| 581 » outFile := path.Join(root, specs.TASKS_CFG_FILE) | |
| 582 » if *testing { | |
| 583 » » // Don't write the file; read it and compare. | |
| 584 » » expect, err := ioutil.ReadFile(outFile) | |
| 585 » » if err != nil { | |
| 586 » » » glog.Fatal(err) | |
| 587 » » } | |
| 588 » » if !bytes.Equal(expect, b) { | |
| 589 » » » glog.Fatalf("Expected no changes, but changes were found
!") | |
| 590 » » } | |
| 591 » } else { | |
| 592 » » if err := ioutil.WriteFile(outFile, b, os.ModePerm); err != nil
{ | |
| 593 » » » glog.Fatal(err) | |
| 594 » » } | |
| 595 » } | |
| 596 } | 515 } |
| 597 | 516 |
| 598 // TODO(borenet): The below really belongs in its own file, probably next to the | 517 // TODO(borenet): The below really belongs in its own file, probably next to the |
| 599 // builder_name_schema.json file. | 518 // builder_name_schema.json file. |
| 600 | 519 |
| 601 // JobNameSchema is a struct used for (de)constructing Job names in a | 520 // JobNameSchema is a struct used for (de)constructing Job names in a |
| 602 // predictable format. | 521 // predictable format. |
| 603 type JobNameSchema struct { | 522 type JobNameSchema struct { |
| 604 Schema map[string][]string `json:"builder_name_schema"` | 523 Schema map[string][]string `json:"builder_name_schema"` |
| 605 Sep string `json:"builder_name_sep"` | 524 Sep string `json:"builder_name_sep"` |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 669 if !ok { | 588 if !ok { |
| 670 return "", fmt.Errorf("Invalid job parts; missing %q", k
) | 589 return "", fmt.Errorf("Invalid job parts; missing %q", k
) |
| 671 } | 590 } |
| 672 rvParts = append(rvParts, v) | 591 rvParts = append(rvParts, v) |
| 673 } | 592 } |
| 674 if _, ok := parts["extra_config"]; ok { | 593 if _, ok := parts["extra_config"]; ok { |
| 675 rvParts = append(rvParts, parts["extra_config"]) | 594 rvParts = append(rvParts, parts["extra_config"]) |
| 676 } | 595 } |
| 677 return strings.Join(rvParts, s.Sep), nil | 596 return strings.Join(rvParts, s.Sep), nil |
| 678 } | 597 } |
| OLD | NEW |