Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(656)

Side by Side Diff: scripts/slave/annotated_run.py

Issue 1501663002: annotated_run.py: Add LogDog bootstrapping. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/tools/build
Patch Set: Updated, actually works. Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | scripts/slave/cipd.py » ('j') | scripts/slave/cipd.py » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 import argparse 6 import argparse
7 import collections 7 import collections
8 import contextlib 8 import contextlib
9 import json 9 import json
10 import logging 10 import logging
11 import os 11 import os
12 import platform 12 import platform
13 import shutil 13 import shutil
14 import socket 14 import socket
15 import subprocess 15 import subprocess
16 import sys 16 import sys
17 import tempfile 17 import tempfile
18 18
19 19
20 # Install Infra build environment. 20 # Install Infra build environment.
21 BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( 21 BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
22 os.path.abspath(__file__)))) 22 os.path.abspath(__file__))))
23 sys.path.insert(0, os.path.join(BUILD_ROOT, 'scripts')) 23 sys.path.insert(0, os.path.join(BUILD_ROOT, 'scripts'))
24 24
25 from common import annotator 25 from common import annotator
26 from common import chromium_utils 26 from common import chromium_utils
27 from common import env 27 from common import env
28 from common import master_cfg_utils 28 from common import master_cfg_utils
29 from slave import gce
29 30
30 # Logging instance. 31 # Logging instance.
31 LOGGER = logging.getLogger('annotated_run') 32 LOGGER = logging.getLogger('annotated_run')
32 33
34 # Return codes used by Butler/Annotee to indicate their failure (as opposed to
35 # a forwarded return code from the underlying process).
36 LOGDOG_ERROR_RETURNCODES = (
iannucci 2016/01/15 04:18:17 gross :(
dnj 2016/01/15 22:05:50 I can't think of a better way to do this part. I s
37 # Butler runtime error.
38 250,
39 # Annotee runtime error.
40 251,
41 )
42
43 # Sentinel value that, if present in master config, matches all builders
44 # underneath that master.
45 WHITELIST_ALL = '*'
46
47 # Whitelist of {master}=>[{builder}|WHITELIST_ALL] whitelisting specific masters
48 # and builders for experimental LogDog/Annotee export.
49 LOGDOG_WHITELIST_MASTER_BUILDERS = {
50 }
51
52 # Configuration for a Pub/Sub topic.
53 PubSubConfig = collections.namedtuple('PubSubConfig', ('project', 'topic'))
54
55 # LogDogPlatform is the set of platform-specific LogDog bootstrapping
56 # configuration parameters.
57 #
58 # See _logdog_get_streamserver_uri for "streamserver" parameter details.
59 LogDogPlatform = collections.namedtuple('LogDogPlatform', (
60 'butler', 'annotee', 'credential_path', 'streamserver',
61 ))
iannucci 2016/01/15 04:18:18 Join previous line?
dnj 2016/01/15 22:05:50 Done.
62
63 # A CIPD binary description, including the package name, version, and relative
64 # path of the binary within the package.
65 CipdBinary = collections.namedtuple('CipdBinary',
66 ('package', 'version', 'relpath'))
33 67
34 # RecipeRuntime will probe this for values. 68 # RecipeRuntime will probe this for values.
35 # - First, (system, platform) 69 # - First, (system, platform)
36 # - Then, (system,) 70 # - Then, (system,)
37 # - Finally, (), 71 # - Finally, (),
38 PLATFORM_CONFIG = { 72 PLATFORM_CONFIG = {
39 # All systems. 73 # All systems.
40 (): {}, 74 (): {
75 'logdog_pubsub': PubSubConfig(
76 project='luci-logdog',
77 topic='logs',
78 ),
79 },
41 80
42 # Linux 81 # Linux
43 ('Linux',): { 82 ('Linux',): {
44 'run_cmd': ['/opt/infra-python/run.py'], 83 'run_cmd': ['/opt/infra-python/run.py'],
84 'logdog_platform': LogDogPlatform(
85 butler=CipdBinary('infra/tools/luci/logdog/butler/linux-amd64',
86 'latest', 'logdog_butler'),
87 annotee=CipdBinary('infra/tools/luci/logdog/annotee/linux-amd64',
88 'latest', 'logdog_annotee'),
89 credential_path=(
90 '/creds/service_accounts/service-account-luci-logdog-pubsub.json'),
91 streamserver='unix',
92 ),
45 }, 93 },
46 94
47 # Mac OSX 95 # Mac OSX
48 ('Darwin',): { 96 ('Darwin',): {
49 'run_cmd': ['/opt/infra-python/run.py'], 97 'run_cmd': ['/opt/infra-python/run.py'],
50 }, 98 },
51 99
52 # Windows 100 # Windows
53 ('Windows',): { 101 ('Windows',): {
54 'run_cmd': ['C:\\infra-python\\ENV\\Scripts\\python.exe', 102 'run_cmd': ['C:\\infra-python\\ENV\\Scripts\\python.exe',
55 'C:\\infra-python\\run.py'], 103 'C:\\infra-python\\run.py'],
56 }, 104 },
57 } 105 }
58 106
59 107
60 # Config is the runtime configuration used by `annotated_run.py` to bootstrap 108 # Config is the runtime configuration used by `annotated_run.py` to bootstrap
61 # the recipe engine. 109 # the recipe engine.
62 Config = collections.namedtuple('Config', ( 110 Config = collections.namedtuple('Config', (
63 'run_cmd', 111 'run_cmd',
112 'logdog_pubsub',
113 'logdog_platform',
64 )) 114 ))
65 115
66 116
117 class Runtime(object):
118 """Runtime is the runtime context of the recipe execution.
119
120 It is a ContextManager that tracks generated files and cleans them up at
121 exit.
122 """
123
124 def __init__(self, leak=False):
125 self._tempdirs = []
126 self._leak = leak
127
128 def cleanup(self, path):
129 self._tempdirs.append(path)
130
131 def tempdir(self, base=None):
132 """Creates a temporary recipe-local working directory and yields it.
133
134 This creates a temporary directory for this annotation run. Directory
135 cleanup is appended to the supplied Runtime.
136
137 This creates two levels of directory:
138 <base>/.recipe_runtime
139 <base>/.recipe_runtime/tmpFOO
140
141 On termination, the entire "<base>/.recipe_runtime" directory is deleted,
142 removing the subdirectory created by this instance as well as cleaning up
143 any other temporary subdirectories leaked by previous executions.
144
145 Args:
146 rt (Runtime): Process-wide runtime.
147 base (str/None): The directory under which the tempdir should be created.
148 If None, the default temporary directory root will be used.
149 """
150 base = base or tempfile.gettempdir()
151 basedir = ensure_directory(base, '.recipe_runtime')
152 self.cleanup(basedir)
153 tdir = tempfile.mkdtemp(dir=basedir)
154 return tdir
155
156 def __enter__(self):
157 return self
158
159 def __exit__(self, _et, _ev, _tb):
160 self.close()
161
162 def close(self):
163 if self._leak:
164 LOGGER.warning('(--leak) Leaking temporary paths: %s', self._tempdirs)
165 else:
166 for path in reversed(self._tempdirs):
167 try:
168 if os.path.isdir(path):
169 LOGGER.debug('Cleaning up temporary directory [%s].', path)
170 chromium_utils.RemoveDirectory(path)
171 except BaseException:
172 LOGGER.exception('Failed to clean up temporary directory [%s].',
173 path)
174 del(self._tempdirs[:])
175
176
67 def get_config(): 177 def get_config():
68 """Returns (Config): The constructed Config object. 178 """Returns (Config): The constructed Config object.
69 179
70 The Config object is constructed from: 180 The Config object is constructed from:
71 - Cascading the PLATFORM_CONFIG fields together based on current 181 - Cascading the PLATFORM_CONFIG fields together based on current
72 OS/Architecture. 182 OS/Architecture.
73 183
74 Raises: 184 Raises:
75 KeyError: if a required configuration key/parameter is not available. 185 KeyError: if a required configuration key/parameter is not available.
76 """ 186 """
77 # Cascade the platform configuration. 187 # Cascade the platform configuration.
78 p = (platform.system(), platform.processor()) 188 p = (platform.system(), platform.processor())
79 platform_config = {} 189 platform_config = {}
80 for i in xrange(len(p)+1): 190 for i in xrange(len(p)+1):
81 platform_config.update(PLATFORM_CONFIG.get(p[:i], {})) 191 platform_config.update(PLATFORM_CONFIG.get(p[:i], {}))
82 192
83 # Construct runtime configuration. 193 # Construct runtime configuration.
84 return Config( 194 return Config(
85 run_cmd=platform_config.get('run_cmd'), 195 run_cmd=platform_config.get('run_cmd'),
196 logdog_pubsub=platform_config.get('logdog_pubsub'),
197 logdog_platform=platform_config.get('logdog_platform'),
86 ) 198 )
87 199
88 200
89 def ensure_directory(*path): 201 def ensure_directory(*path):
90 path = os.path.join(*path) 202 path = os.path.join(*path)
91 if not os.path.isdir(path): 203 if not os.path.isdir(path):
92 os.makedirs(path) 204 os.makedirs(path)
93 return path 205 return path
94 206
95 207
208 def _logdog_get_streamserver_uri(rt, typ):
209 """Returns (str): The Butler StreamServer URI.
210
211 Args:
212 rt (Runtime): Process-wide runtime.
213 typ (str): The type of URI to generate. One of: ['unix'].
214 Raises:
215 LogDogBootstrapError: if |typ| is not a known type.
216 """
217 if typ == 'unix':
218 # We have to use a custom temporary directory here. This is due to the path
219 # length limitation on UNIX domain sockets, which is generally 104-108
220 # characters. We can't make that assumption about our standard recipe
221 # temporary directory.
222 sockdir = rt.tempdir()
223 uri = 'unix:%s' % (os.path.join(sockdir, 'butler.sock'),)
224 if len(uri) > 104:
225 raise LogDogBootstrapError('Generated URI exceeds UNIX domain socket '
226 'name size: %s' % (uri,))
227 return uri
228 raise LogDogBootstrapError('No streamserver URI generator.')
229
230
96 def _run_command(cmd, **kwargs): 231 def _run_command(cmd, **kwargs):
97 if kwargs.pop('dry_run', False): 232 if kwargs.pop('dry_run', False):
98 LOGGER.info('(Dry Run) Would have executed command: %s', cmd) 233 LOGGER.info('(Dry Run) Would have executed command: %s', cmd)
99 return 0, '' 234 return 0, ''
100 235
101 LOGGER.debug('Executing command: %s', cmd) 236 LOGGER.debug('Executing command: %s', cmd)
102 kwargs.setdefault('stderr', subprocess.STDOUT) 237 kwargs.setdefault('stderr', subprocess.STDOUT)
103 proc = subprocess.Popen(cmd, **kwargs) 238 proc = subprocess.Popen(cmd, **kwargs)
104 stdout, _ = proc.communicate() 239 stdout, _ = proc.communicate()
105 240
106 LOGGER.debug('Process [%s] returned [%d] with output:\n%s', 241 LOGGER.debug('Process [%s] returned [%d] with output:\n%s',
107 cmd, proc.returncode, stdout) 242 cmd, proc.returncode, stdout)
108 return proc.returncode, stdout 243 return proc.returncode, stdout
109 244
110 245
111 def _check_command(cmd, **kwargs): 246 def _check_command(cmd, **kwargs):
112 rv, stdout = _run_command(cmd, **kwargs) 247 rv, stdout = _run_command(cmd, **kwargs)
113 if rv != 0: 248 if rv != 0:
114 raise subprocess.CalledProcessError(rv, cmd, output=stdout) 249 raise subprocess.CalledProcessError(rv, cmd, output=stdout)
115 return stdout 250 return stdout
116 251
117 252
118 @contextlib.contextmanager 253 class LogDogNotBootstrapped(Exception):
119 def recipe_tempdir(root=None, leak=False): 254 pass
120 """Creates a temporary recipe-local working directory and yields it. 255
121 256
122 This creates a temporary directory for this annotation run that is 257 class LogDogBootstrapError(Exception):
123 automatically cleaned up. It returns the directory. 258 pass
124 259
125 Args: 260
126 root (str/None): If not None, the root directory. Otherwise, |os.cwd| will 261 def is_executable(path):
127 be used. 262 return os.path.isfile(path) and os.access(path, os.X_OK)
iannucci 2016/01/15 04:18:18 why not just os.access(path, os.X_OK) and catch th
dnj 2016/01/15 22:05:50 Actually apparently nothing uses this anymore.
128 leak (bool): If true, don't clean up the temporary directory on exit. 263
129 """ 264
130 basedir = ensure_directory((root or os.getcwd()), '.recipe_runtime') 265 def ensure_directory(*path):
266 path = os.path.join(*path)
267 if not os.path.isdir(path):
268 os.makedirs(path)
269 return path
270
271
272 def _get_service_account_json(opts, credential_path):
273 """Returns (str/None): If specified, the path to the service account JSON.
274
275 This method probes the local environment and returns a (possibly empty) list
276 of arguments to add to the Butler command line for authentication.
277
278 If we're running on a GCE instance, no arguments will be returned, as GCE
279 service account is implicitly authenticated. If we're running on Baremetal,
280 a path to those credentials will be returned.
281
282 Args:
283 rt (RecipeRuntime): The runtime environment.
284 Raises:
285 |LogDogBootstrapError| if no credentials could be found.
286 """
287 path = opts.logdog_service_account_json
288 if path:
289 return path
290
291 if gce.Authenticator.is_gce():
292 LOGGER.info('Running on GCE. No credentials necessary.')
293 return None
294
295 if os.path.isfile(credential_path):
296 return credential_path
297
298 raise LogDogBootstrapError('Could not find service account credentials. '
299 'Tried: %s' % (credential_path,))
300
301
302 def _logdog_install_cipd(path, *packages):
303 """Returns (list): The paths to the binaries in each of the packages.
304
305 This method bootstraps CIPD in "path", installing the packages specified
306 by "packages" and returning the paths to their binaries.
307
308 Args:
309 path (str): The CIPD installation root.
310 packages (CipdBinary): The set of CIPD binary packages to install.
311 """
312 verbosity = 0
313 level = logging.getLogger().level
314 if level <= logging.INFO:
315 verbosity += 1
316 if level <= logging.DEBUG:
317 verbosity += 1
318
319 packages_path = os.path.join(path, 'packages.json')
320 pmap = {}
321 cmd = [
322 sys.executable,
323 os.path.join(env.Build, 'scripts', 'slave', 'cipd.py'),
324 '--dest-directory', path,
325 '--json-output', packages_path,
326 ] + (['--verbose'] * verbosity)
327 for p in packages:
328 cmd += ['-P', '%s@%s' % (p.package, p.version)]
329 pmap[p.package] = os.path.join(path, p.relpath)
330
131 try: 331 try:
132 tdir = tempfile.mkdtemp(dir=basedir) 332 _check_command(cmd)
133 yield tdir 333 except subprocess.CalledProcessError:
134 finally: 334 LOGGER.exception('Failed to install LogDog CIPD packages.')
135 if basedir and os.path.isdir(basedir): 335 raise LogDogBootstrapError()
136 if not leak: 336
137 LOGGER.debug('Cleaning up temporary directory [%s].', basedir) 337 # Resolve installed packages.
138 try: 338 return tuple(pmap[p.package] for p in packages)
139 chromium_utils.RemoveDirectory(basedir) 339
140 except Exception: 340
141 LOGGER.exception('Failed to clean up temporary directory [%s].', 341 def _build_logdog_prefix(properties):
dnj 2016/01/14 22:50:28 This translates the master, builder, and build num
142 basedir) 342 """Constructs a LogDog stream prefix from the supplied properties.
343
344 The returned prefix is of the form:
345 bb/<mastername>/<buildername>/<buildnumber>
346
347 Any path-incompatible characters will be flattened to underscores.
348 """
349 def normalize(s):
350 parts = []
351 for ch in str(s):
352 if ch.isalnum() or ch in ':_-.':
353 parts.append(ch)
143 else: 354 else:
144 LOGGER.warning('(--leak) Leaking temporary directory [%s].', basedir) 355 parts.append('_')
356 if not parts[0].isalnum():
357 parts.insert(0, 's_')
358 return ''.join(parts)
359
360 components = {}
361 for f in ('mastername', 'buildername', 'buildnumber'):
362 prop = properties.get(f)
363 if not prop:
364 raise LogDogBootstrapError('Missing build property [%s].' % (f,))
365 components[f] = normalize(properties.get(f))
366 return 'bb/%(mastername)s/%(buildername)s/%(buildnumber)s' % components
367
368
369 def _logdog_bootstrap(rt, opts, tempdir, config, properties, cmd):
370 """Executes the recipe engine, bootstrapping it through LogDog/Annotee.
371
372 This method executes the recipe engine, bootstrapping it through
373 LogDog/Annotee so its output and annotations are streamed to LogDog. The
374 bootstrap is configured to tee the annotations through STDOUT/STDERR so they
375 will still be sent to BuildBot.
376
377 The overall setup here is:
378 [annotated_run.py] => [logdog_butler] => [logdog_annotee] => [recipes.py]
379
380 Args:
381 rt (Runtime): Process-wide runtime.
382 opts (argparse.Namespace): Command-line options.
383 tempdir (str): The path to the session temporary directory.
384 config (Config): Recipe runtime configuration.
385 properties (dict): Build properties.
386 cmd (list): The recipe runner command list to bootstrap.
387
388 Returns (int): The return code of the recipe runner process.
389
390 Raises:
391 LogDogNotBootstrapped: if the recipe engine was not executed because the
392 LogDog bootstrap requirements are not available.
393 LogDogBootstrapError: if there was an error bootstrapping the recipe runner
394 through LogDog.
395 """
396 bootstrap_dir = ensure_directory(tempdir, 'logdog_bootstrap')
397
398 plat = config.logdog_platform
399 if not plat:
400 raise LogDogNotBootstrapped('LogDog platform is not configured.')
401
402 # Determine LogDog prefix.
403 prefix = _build_logdog_prefix(properties)
404
405 # TODO(dnj): Consider moving this to a permanent directory on the bot so we
406 # don't CIPD-refresh each time.
407 cipd_path = os.path.join(bootstrap_dir, 'cipd')
408 butler, annotee = _logdog_install_cipd(cipd_path, plat.butler, plat.annotee)
409 if opts.logdog_butler_path:
410 butler = opts.logdog_butler_path
411 if opts.logdog_annotee_path:
412 annotee = opts.logdog_annotee_path
413
414 if not config.logdog_pubsub:
415 raise LogDogNotBootstrapped('No Pub/Sub configured.')
416 if not config.logdog_pubsub.project:
417 raise LogDogNotBootstrapped('No Pub/Sub project configured.')
418 if not config.logdog_pubsub.topic:
419 raise LogDogNotBootstrapped('No Pub/Sub topic configured.')
420
421 # Determine LogDog verbosity.
422 logdog_verbose = []
423 if opts.logdog_verbose == 0:
424 pass
425 elif opts.logdog_verbose == 1:
426 logdog_verbose.append('-log_level=info')
427 else:
428 logdog_verbose.append('-log_level=debug')
429
430 service_account_args = []
431 service_account_json = _get_service_account_json(opts, plat.credential_path)
432 if service_account_json:
433 service_account_args += ['-service-account-json', service_account_json]
434
435 # Generate our Butler stream server URI.
436 streamserver_uri = _logdog_get_streamserver_uri(rt, plat.streamserver)
iannucci 2016/01/15 04:18:17 what cleans this socket file up?
dnj 2016/01/15 22:05:50 The Butler should delete it, but it's also built w
437
438 # Dump the bootstrapped Annotee command to JSON for Annotee to load.
439 #
440 # Annotee can run accept bootstrap parameters through either JSON or
441 # command-line, but using JSON effectively steps around any sort of command-
442 # line length limits such as those experienced on Windows.
443 cmd_json = os.path.join(bootstrap_dir, 'annotee_cmd.json')
444 with open(cmd_json, 'w') as fd:
445 json.dump(cmd, fd)
446
447 # Butler Command.
448 cmd = [
449 butler,
450 '-prefix', prefix,
451 '-output', 'pubsub,project="%(project)s",topic="%(topic)s"' % (
452 config.logdog_pubsub._asdict()),
453 ]
454 cmd += logdog_verbose
455 cmd += service_account_args
456 cmd += [
457 'run',
458 '-stdout', 'tee=stdout',
459 '-stderr', 'tee=stderr',
460 '-streamserver-uri', streamserver_uri,
461 '--',
462 ]
463
464 # Annotee Command.
465 cmd += [
466 annotee,
467 '-butler-stream-server', streamserver_uri,
468 '-annotate', 'tee',
469 '-name-base', 'recipes',
470 '-print-summary',
471 '-tee',
iannucci 2016/01/15 04:18:17 this tees everything currently, right? Later we co
dnj 2016/01/15 22:05:50 Exactly.
472 '-json-args-path', cmd_json,
473 ]
474 cmd += logdog_verbose
475
476 rv, _ = _run_command(cmd, dry_run=opts.dry_run)
477 if rv in LOGDOG_ERROR_RETURNCODES:
478 raise LogDogBootstrapError('LogDog Error (%d)' % (rv,))
479 return rv
480
481
482 def _should_run_logdog(properties):
483 """Returns (bool): True if LogDog should be used for this run.
484
485 Args:
486 properties (dict): The factory properties for this recipe run.
487 """
488 mastername = properties.get('mastername')
489 buildername = properties.get('buildername')
490 if not all((mastername, buildername)):
491 LOGGER.warning('Required mastername/buildername is not set.')
492 return False
493
494 # Key on mastername.
495 bdict = LOGDOG_WHITELIST_MASTER_BUILDERS.get(mastername)
496 if bdict is not None:
497 # Key on buildername.
498 if WHITELIST_ALL in bdict or buildername in bdict:
499 LOGGER.info('Whitelisted master %s, builder %s.',
500 mastername, buildername)
501 return True
502
503 LOGGER.info('Master %s, builder %s is not whitelisted for LogDog.',
504 mastername, buildername)
505 return False
145 506
146 507
147 def get_recipe_properties(workdir, build_properties, 508 def get_recipe_properties(workdir, build_properties,
148 use_factory_properties_from_disk): 509 use_factory_properties_from_disk):
149 """Constructs the recipe's properties from buildbot's properties. 510 """Constructs the recipe's properties from buildbot's properties.
150 511
151 This retrieves the current factory properties from the master_config 512 This retrieves the current factory properties from the master_config
152 in the slave's checkout (no factory properties are handed to us from the 513 in the slave's checkout (no factory properties are handed to us from the
153 master), and merges in the build properties. 514 master), and merges in the build properties.
154 515
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
290 help='factory properties in b64 gz JSON format') 651 help='factory properties in b64 gz JSON format')
291 parser.add_argument('--keep-stdin', action='store_true', default=False, 652 parser.add_argument('--keep-stdin', action='store_true', default=False,
292 help='don\'t close stdin when running recipe steps') 653 help='don\'t close stdin when running recipe steps')
293 parser.add_argument('--master-overrides-slave', action='store_true', 654 parser.add_argument('--master-overrides-slave', action='store_true',
294 help='use the property values given on the command line from the master, ' 655 help='use the property values given on the command line from the master, '
295 'not the ones looked up on the slave') 656 'not the ones looked up on the slave')
296 parser.add_argument('--use-factory-properties-from-disk', 657 parser.add_argument('--use-factory-properties-from-disk',
297 action='store_true', default=False, 658 action='store_true', default=False,
298 help='use factory properties loaded from disk on the slave') 659 help='use factory properties loaded from disk on the slave')
299 660
661 group = parser.add_argument_group('LogDog Bootstrap')
662 group.add_argument('--logdog-verbose',
663 action='count', default=0,
664 help='Increase LogDog verbosity. This can be specified multiple times.')
665 group.add_argument('--logdog-force', action='store_true',
666 help='Force LogDog bootstrapping, even if the system is not configured.')
667 group.add_argument('--logdog-butler-path',
668 help='Path to the LogDog Butler. If empty, one will be probed/downloaded '
669 'from CIPD.')
670 group.add_argument('--logdog-annotee-path',
671 help='Path to the LogDog Annotee. If empty, one will be '
672 'probed/downloaded from CIPD.')
673 group.add_argument('--logdog-service-account-json',
674 help='Path to the service account JSON. If one is not provided, the '
675 'local system credentials will be used.')
676
300 return parser.parse_args(argv) 677 return parser.parse_args(argv)
301 678
302 679
303 def update_scripts(): 680 def update_scripts():
304 if os.environ.get('RUN_SLAVE_UPDATED_SCRIPTS'): 681 if os.environ.get('RUN_SLAVE_UPDATED_SCRIPTS'):
305 os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS') 682 os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS')
306 return False 683 return False
307 684
308 stream = annotator.StructuredAnnotationStream() 685 stream = annotator.StructuredAnnotationStream()
309 686
310 with stream.step('update_scripts') as s: 687 with stream.step('update_scripts') as s:
311 gclient_name = 'gclient' 688 gclient_name = 'gclient'
312 if sys.platform.startswith('win'): 689 if sys.platform.startswith('win'):
313 gclient_name += '.bat' 690 gclient_name += '.bat'
314 gclient_path = os.path.join(env.Build, '..', 'depot_tools', 691 gclient_path = os.path.join(env.Build, os.pardir, 'depot_tools',
315 gclient_name) 692 gclient_name)
316 gclient_cmd = [gclient_path, 'sync', '--force', '--verbose', '--jobs=2'] 693 gclient_cmd = [gclient_path, 'sync', '--force', '--verbose', '--jobs=2']
317 try: 694 try:
318 fd, output_json = tempfile.mkstemp() 695 fd, output_json = tempfile.mkstemp()
319 os.close(fd) 696 os.close(fd)
320 gclient_cmd += ['--output-json', output_json] 697 gclient_cmd += ['--output-json', output_json]
321 except Exception: 698 except Exception:
322 # Super paranoia try block. 699 # Super paranoia try block.
323 output_json = None 700 output_json = None
324 cmd_dict = { 701 cmd_dict = {
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 ] 794 ]
418 # Add this conditionally so that we get an error in 795 # Add this conditionally so that we get an error in
419 # send_monitoring_event log files in case it isn't present. 796 # send_monitoring_event log files in case it isn't present.
420 if hostname: 797 if hostname:
421 cmd += ['--build-event-hostname', hostname] 798 cmd += ['--build-event-hostname', hostname]
422 _check_command(cmd) 799 _check_command(cmd)
423 except Exception: 800 except Exception:
424 LOGGER.warning("Failed to send monitoring event.", exc_info=True) 801 LOGGER.warning("Failed to send monitoring event.", exc_info=True)
425 802
426 803
804 def _exec_recipe(rt, opts, tdir, config, properties):
805 # Find out if the recipe we intend to run is in build_internal's recipes. If
806 # so, use recipes.py from there, otherwise use the one from build.
807 recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py'
808
809 # Use the standard recipe runner unless the recipes are explicitly in the
810 # "build_limited" repository.
811 recipe_runner = os.path.join(env.Build,
812 'scripts', 'slave', 'recipes.py')
813 if env.BuildInternal:
814 build_limited = os.path.join(env.BuildInternal, 'scripts', 'slave')
815 if os.path.exists(os.path.join(build_limited, 'recipes', recipe_file)):
816 recipe_runner = os.path.join(build_limited, 'recipes.py')
817
818 # Dump properties to JSON and build recipe command.
819 props_file = os.path.join(tdir, 'recipe_properties.json')
820 with open(props_file, 'w') as fh:
821 json.dump(properties, fh)
822
823 cmd = [
824 sys.executable, '-u', recipe_runner,
825 'run',
826 '--workdir=%s' % os.getcwd(),
827 '--properties-file=%s' % props_file,
828 properties['recipe'],
829 ]
830
831 status = None
832 try:
833 if opts.logdog_force or _should_run_logdog(properties):
834 status = _logdog_bootstrap(rt, opts, tdir, config, properties, cmd)
iannucci 2016/01/15 04:18:18 can this ever return None even after running the r
dnj 2016/01/15 22:05:50 It currently only raises Exceptions or returns the
835 except LogDogNotBootstrapped as e:
836 LOGGER.info('Not bootstrapped: %s', e.message)
837 except LogDogBootstrapError as e:
838 LOGGER.warning('Could not bootstrap LogDog: %s', e.message)
839 except Exception as e:
840 LOGGER.exception('Exception while bootstrapping LogDog.')
841 finally:
842 if status is None:
843 LOGGER.info('Not using LogDog. Invoking `recipes.py` directly.')
844 status, _ = _run_command(cmd, dry_run=opts.dry_run)
845
846 return status
847
848
427 def main(argv): 849 def main(argv):
428 opts = get_args(argv) 850 opts = get_args(argv)
429 851
430 if opts.verbose == 0: 852 if opts.verbose == 0:
431 level = logging.INFO 853 level = logging.INFO
432 else: 854 else:
433 level = logging.DEBUG 855 level = logging.DEBUG
434 logging.getLogger().setLevel(level) 856 logging.getLogger().setLevel(level)
435 857
436 clean_old_recipe_engine() 858 clean_old_recipe_engine()
437 859
438 # Enter our runtime environment. 860 # Enter our runtime environment.
439 with recipe_tempdir(leak=opts.leak) as tdir: 861 with Runtime(leak=opts.leak) as rt:
862 tdir = rt.tempdir(os.getcwd())
440 LOGGER.debug('Using temporary directory: [%s].', tdir) 863 LOGGER.debug('Using temporary directory: [%s].', tdir)
441 864
442 # Load factory properties and configuration. 865 # Load factory properties and configuration.
443 # TODO(crbug.com/551165): remove flag "factory_properties". 866 # TODO(crbug.com/551165): remove flag "factory_properties".
444 use_factory_properties_from_disk = (opts.use_factory_properties_from_disk or 867 use_factory_properties_from_disk = (opts.use_factory_properties_from_disk or
445 bool(opts.factory_properties)) 868 bool(opts.factory_properties))
446 properties = get_recipe_properties( 869 properties = get_recipe_properties(
447 tdir, opts.build_properties, use_factory_properties_from_disk) 870 tdir, opts.build_properties, use_factory_properties_from_disk)
448 LOGGER.debug('Loaded properties: %s', properties) 871 LOGGER.debug('Loaded properties: %s', properties)
449 872
450 config = get_config() 873 config = get_config()
451 LOGGER.debug('Loaded runtime configuration: %s', config) 874 LOGGER.debug('Loaded runtime configuration: %s', config)
452 875
453 # Find out if the recipe we intend to run is in build_internal's recipes. If
454 # so, use recipes.py from there, otherwise use the one from build.
455 recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py'
456
457 # Use the standard recipe runner unless the recipes are explicitly in the
458 # "build_limited" repository.
459 recipe_runner = os.path.join(env.Build,
460 'scripts', 'slave', 'recipes.py')
461 if env.BuildInternal:
462 build_limited = os.path.join(env.BuildInternal, 'scripts', 'slave')
463 if os.path.exists(os.path.join(build_limited, 'recipes', recipe_file)):
464 recipe_runner = os.path.join(build_limited, 'recipes.py')
465
466 # Setup monitoring directory and send a monitoring event. 876 # Setup monitoring directory and send a monitoring event.
467 build_data_dir = ensure_directory(tdir, 'build_data') 877 build_data_dir = ensure_directory(tdir, 'build_data')
468 properties['build_data_dir'] = build_data_dir 878 properties['build_data_dir'] = build_data_dir
469 879
470 # Write our annotated_run.py monitoring event. 880 # Write our annotated_run.py monitoring event.
471 write_monitoring_event(config, build_data_dir, properties) 881 write_monitoring_event(config, build_data_dir, properties)
472 882
473 # Dump properties to JSON and build recipe command. 883 # Execute our recipe.
474 props_file = os.path.join(tdir, 'recipe_properties.json') 884 return _exec_recipe(rt, opts, tdir, config, properties)
475 with open(props_file, 'w') as fh:
476 json.dump(properties, fh)
477 cmd = [
478 sys.executable, '-u', recipe_runner,
479 'run',
480 '--workdir=%s' % os.getcwd(),
481 '--properties-file=%s' % props_file,
482 properties['recipe'],
483 ]
484
485 status, _ = _run_command(cmd, dry_run=opts.dry_run)
486
487 return status
488 885
489 886
490 def shell_main(argv): 887 def shell_main(argv):
491 if update_scripts(): 888 if update_scripts():
492 # Re-execute with the updated annotated_run.py. 889 # Re-execute with the updated annotated_run.py.
493 rv, _ = _run_command([sys.executable] + argv) 890 rv, _ = _run_command([sys.executable] + argv)
494 return rv 891 return rv
495 else: 892 else:
496 return main(argv[1:]) 893 return main(argv[1:])
497 894
498 895
499 if __name__ == '__main__': 896 if __name__ == '__main__':
500 logging.basicConfig(level=logging.INFO) 897 logging.basicConfig(level=logging.INFO)
501 sys.exit(shell_main(sys.argv)) 898 sys.exit(shell_main(sys.argv))
OLDNEW
« no previous file with comments | « no previous file | scripts/slave/cipd.py » ('j') | scripts/slave/cipd.py » ('J')

Powered by Google App Engine
This is Rietveld 408576698