OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 import argparse | |
7 import collections | |
6 import contextlib | 8 import contextlib |
9 import datetime | |
10 import hashlib | |
11 import itertools | |
7 import json | 12 import json |
8 import optparse | 13 import logging |
9 import os | 14 import os |
15 import platform | |
10 import shutil | 16 import shutil |
11 import socket | 17 import socket |
12 import subprocess | 18 import subprocess |
13 import sys | 19 import sys |
14 import tempfile | 20 import tempfile |
15 import traceback | 21 |
16 | 22 |
23 # Install Infra build environment. | |
17 BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( | 24 BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( |
18 os.path.abspath(__file__)))) | 25 os.path.abspath(__file__)))) |
19 sys.path.append(os.path.join(BUILD_ROOT, 'scripts')) | 26 sys.path.insert(0, os.path.join(BUILD_ROOT, 'scripts')) |
20 sys.path.append(os.path.join(BUILD_ROOT, 'third_party')) | 27 import common.env |
28 common.env.Install() | |
21 | 29 |
22 from common import annotator | 30 from common import annotator |
23 from common import chromium_utils | 31 from common import chromium_utils |
24 from common import master_cfg_utils | 32 from common import master_cfg_utils |
25 | 33 from gerrit_util import GceAuthenticator |
26 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) | 34 |
27 BUILD_LIMITED_ROOT = os.path.join( | 35 SCRIPT_PATH = os.path.join(common.env.Build, 'scripts', 'slave') |
28 os.path.dirname(BUILD_ROOT), 'build_internal', 'scripts', 'slave') | 36 BUILD_LIMITED_ROOT = os.path.join(common.env.BuildInternal, 'scripts', 'slave') |
29 | 37 |
30 PACKAGE_CFG = os.path.join( | 38 # Logging instance. |
31 os.path.dirname(os.path.dirname(SCRIPT_PATH)), | 39 LOGGER = logging.getLogger('annotated_run') |
32 'infra', 'config', 'recipes.cfg') | 40 |
33 | 41 # Return codes used by Butler/Annotee to indicate their failure (as opposed to |
34 if sys.platform.startswith('win'): | 42 # a forwarded return code from the underlying process). |
35 # TODO(pgervais): add windows support | 43 LOGDOG_ERROR_RETURNCODES = ( |
iannucci
2015/12/01 02:38:01
I think the logdog stuff should be in a different
| |
36 # QQ: Where is infra/run.py on windows machines? | 44 # Butler runtime error. |
37 RUN_CMD = None | 45 250, |
38 else: | 46 # Annotee runtime error. |
39 RUN_CMD = os.path.join('/', 'opt', 'infra-python', 'run.py') | 47 251, |
40 | 48 ) |
41 @contextlib.contextmanager | 49 |
42 def namedTempFile(): | 50 # Whitelist of {master}=>[{builder}|WHITELIST_ALL] whitelisting specific masters |
iannucci
2015/12/01 02:38:01
is {builder} a set?
dnj
2015/12/01 03:36:04
I was using that notation to mean "key", but I can
| |
43 fd, name = tempfile.mkstemp() | 51 # and builders for experimental LogDog/Annotee export. |
44 os.close(fd) # let the exceptions fly | 52 LOGDOG_WHITELIST_MASTER_BUILDERS = { |
45 try: | 53 } |
46 yield name | 54 |
47 finally: | 55 # Sentinel value that, if present in master config, matches all builders |
56 # underneath that master. | |
57 WHITELIST_ALL = '*' | |
58 | |
59 # Configuration for a Pub/Sub topic. | |
60 PubSubConfig = collections.namedtuple('PubSubConfig', ('project', 'topic')) | |
61 | |
62 # RecipeRuntime will probe this for values. | |
63 # - First, (system, platform) | |
64 # - Then, (system,) | |
65 # - Finally, (), | |
66 PLATFORM_CONFIG = { | |
67 # All systems. | |
68 (): { | |
69 'logdog_pubsub': PubSubConfig( | |
70 project='luci-logdog', | |
71 topic='chrome-infra-beta', | |
72 ), | |
73 }, | |
74 | |
75 # Linux | |
76 ('Linux',): { | |
77 'run_cmd': '/opt/infra-python/run.py', | |
78 'cipd_static_paths': ( | |
79 # XXX: Get this right? | |
80 '/opt/chrome-infra', | |
81 ), | |
82 'credential_paths': ( | |
83 # XXX: Get this right? | |
84 '/opt/infra/service_accounts', | |
85 ), | |
86 'logdog_butler_streamserver_gen': lambda rt: os.path.join(rt.workdir, | |
87 'butler.sock'), | |
88 }, | |
89 ('Linux', 'x86_64'): { | |
90 'logdog_cipd_packages': { | |
91 'infra/tools/luci/logdog/butler/linux-amd64': 'testing', | |
92 'infra/tools/luci/logdog/annotee/linux-amd64': 'testing', | |
93 }, | |
94 }, | |
95 | |
96 # Mac OSX | |
97 ('Darwin',): { | |
98 'run_cmd': '/opt/infra-python/run.py', | |
99 }, | |
100 | |
101 # Windows | |
102 ('Windows',): {}, | |
103 } | |
104 | |
105 | |
106 class LogDogNotBootstrapped(Exception): | |
107 pass | |
108 | |
109 | |
110 class LogDogBootstrapError(Exception): | |
111 pass | |
112 | |
113 | |
114 def is_executable(path): | |
115 return os.path.isfile(path) and os.access(path, os.X_OK) | |
116 | |
117 | |
118 def ensure_directory(*path): | |
119 path = os.path.join(*path) | |
120 if not os.path.isdir(path): | |
121 os.makedirs(path) | |
122 return path | |
123 | |
124 | |
125 def _run_command(cmd, **kwargs): | |
126 dry_run = kwargs.pop('dry_run', False) | |
127 | |
128 LOGGER.debug('Executing command: %s', cmd) | |
129 if dry_run: | |
130 LOGGER.info('(Dry Run) Not executing command.') | |
131 return 0, '' | |
132 proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT) | |
133 stdout, _ = proc.communicate() | |
134 | |
135 LOGGER.debug('Process [%s] returned [%d] with output:\n%s', | |
136 cmd, proc.returncode, stdout) | |
137 return proc.returncode, stdout | |
138 | |
139 | |
140 def _check_command(*args, **kwargs): | |
141 rv, stdout = _run_command(args, **kwargs) | |
142 if rv != 0: | |
143 raise ValueError('Process exited with non-zero return code (%d)' % (rv,)) | |
144 return stdout | |
145 | |
146 | |
147 class RecipeRuntime(object): | |
148 """RecipeRuntime is the platform-specific runtime enviornment.""" | |
149 | |
150 _SENTINEL = object() | |
151 | |
152 def __init__(self, **kwargs): | |
153 self._fields = kwargs | |
154 | |
155 @classmethod | |
156 @contextlib.contextmanager | |
157 def enter(cls, leak, **kw): | |
iannucci
2015/12/01 02:38:01
what is leak? docstring?
dnj
2015/12/01 03:36:04
Done.
| |
158 # Build our platform fields. | |
159 p = (platform.system(), platform.processor()) | |
160 fields = {} | |
161 for i in xrange(len(p)+1): | |
162 fields.update(PLATFORM_CONFIG.get(p[:i], {})) | |
163 fields.update(kw) | |
iannucci
2015/12/01 02:38:01
why not put this stuff in __init__? why is there `
dnj
2015/12/01 03:36:04
In general, I don't like __init__ doing actual wor
| |
164 | |
165 basedir = ensure_directory(os.getcwd(), '.recipe_runtime') | |
48 try: | 166 try: |
49 os.remove(name) | 167 tdir = tempfile.mkdtemp(dir=basedir) |
50 except OSError as e: | 168 LOGGER.debug('Using temporary directory [%s].', tdir) |
51 print >> sys.stderr, "LEAK: %s: %s" % (name, e) | 169 |
170 fields['workdir'] = tdir | |
171 yield cls(**fields) | |
172 finally: | |
173 if basedir and os.path.isdir(basedir): | |
174 if not leak: | |
175 LOGGER.debug('Cleaning up temporary directory [%s].', basedir) | |
176 try: | |
177 # TODO(pgervais): use infra_libs.rmtree instead. | |
178 shutil.rmtree(basedir) | |
179 except Exception: | |
180 LOGGER.exception('Failed to clean up temporary directory [%s].', | |
181 basedir) | |
182 else: | |
183 LOGGER.warning('(--leak) Leaking temporary directory [%s].', basedir) | |
184 | |
185 def __getattr__(self, key): | |
iannucci
2015/12/01 02:38:01
this seems unnecessarily fancy... why not just use
dnj
2015/12/01 03:36:04
I dunno, I think this usage looks way better.
| |
186 # Class methods/variables. | |
187 value = getattr(super(RecipeRuntime, self), key, self._SENTINEL) | |
188 if value is not self._SENTINEL: | |
189 return value | |
190 | |
191 value = getattr(self, 'get')(key, self._SENTINEL) | |
192 if value is not self._SENTINEL: | |
193 return value | |
194 raise KeyError(key) | |
195 | |
196 def get(self, key, default=None): | |
197 value = self._fields.get(key, self._SENTINEL) | |
198 if value is not self._SENTINEL: | |
199 return value | |
200 return default | |
201 | |
202 def __str__(self): | |
203 return str(self._fields) | |
204 | |
205 | |
206 class CIPD(object): | |
iannucci
2015/12/01 02:38:01
separate file?
dnj
2015/12/01 03:36:04
I wasn't intending to make this a first-class libr
| |
207 _CIPD_NAME = 'cipd' | |
208 | |
209 def __init__(self, path, root): | |
210 self._cipd_path = path | |
211 self._root = root | |
212 | |
213 @classmethod | |
214 def find(cls, rt, rootdir): | |
215 for p in itertools.chain( | |
216 iter(os.environ.get('PATH').split(os.pathsep)), | |
217 rt.cipd_static_paths): | |
218 candidate = os.path.join(p, cls._CIPD_NAME) | |
219 if os.path.isfile(candidate) and os.access(candidate, os.X_OK): | |
220 return cls(candidate, rootdir) | |
221 return None | |
222 | |
223 def __call__(self, *args): | |
224 cmd = [self._cipd_path] | |
225 cmd.extend(args) | |
226 _check_command(*cmd) | |
227 | |
228 def path(self, *components): | |
229 return os.path.join(self._root, *components) | |
230 | |
231 def ensure(self, **packages): | |
232 if len(packages) == 0: | |
233 return | |
234 | |
235 # Emit package list. | |
236 package_list = self.path('package_list.txt') | |
237 lines = [ | |
238 '# Automatically generated CIPD package list (launcher.py)', | |
239 '# Generated at: %s' % (datetime.datetime.now().isoformat(),), | |
240 '', | |
241 ] | |
242 for pkg, version in sorted(packages.iteritems()): | |
243 lines.append('%s %s' % (pkg, version)) | |
244 | |
245 ensure_directory(self._root) | |
246 with open(package_list, 'w+') as fd: | |
247 fd.write('\n'.join(lines)) | |
248 | |
249 # Ensure against the package list. | |
250 args = [ | |
251 'ensure', | |
252 '-root', self._root, | |
253 '-list', package_list, | |
254 ] | |
255 self(*args) | |
256 | |
257 | |
258 def _get_service_account_json(rt, opts): | |
259 """Returns (str/None): If specified, the path to the service account JSON. | |
260 | |
261 This method probes the local environemnt and returns a (possibly empty) list | |
262 of arguments to add to the Butler command line for authentication. | |
263 | |
264 If we're running on a GCE instance, no arguments will be returned, as GCE | |
265 service account is implicitly authenticated. If we're running on Baremetal, | |
266 a path to those credentials will be returned. | |
267 | |
268 Args: | |
269 rt (RecipeRuntime): The runtime environment. | |
270 Raises: | |
271 |LogDogBootstrapError| if no credentials could be found. | |
272 """ | |
273 path = opts.get('service_account_json') | |
274 if path: | |
275 return path | |
276 | |
277 if GceAuthenticator.is_gce(): | |
278 LOGGER.info('Running on GCE. No credentials necessary.') | |
279 return None | |
280 | |
281 for credential_path in rt.get('credential_paths', ()): | |
282 candidate = os.path.join(credential_path, 'logdog_service_account.json') | |
283 if os.path.isfile(candidate): | |
284 return candidate | |
285 | |
286 raise LogDogBootstrapError('Could not find service account credentials.') | |
287 | |
288 | |
289 def _logdog_bootstrap(rt, opts, cmd): | |
iannucci
2015/12/01 02:38:01
need docstrings for these suckers
dnj
2015/12/01 03:36:04
Done.
| |
290 bootstrap_dir = ensure_directory(rt.tempdir, 'logdog_bootstrap') | |
291 butler, annotee = opts.logdog_butler_path, opts.logdog_annotee_path | |
292 if not (butler and annotee): | |
293 # Load packages via CIPD. | |
294 cipd = CIPD.find(rt, os.path.join(bootstrap_dir, 'cipd_root')) | |
295 if rt.logdog_cipd_packages: | |
296 if not cipd: | |
297 raise LogDogBootstrapError('Could not find CIPD binary.') | |
298 cipd.ensure(**rt.logdog_cipd_packages) | |
299 if not butler: | |
300 butler = cipd.path('logdog_butler') | |
301 if not annotee: | |
302 annotee = cipd.path('logdog_annotee') | |
303 | |
304 if not is_executable(annotee): | |
305 raise LogDogNotBootstrapped('Annotee is not executable: %s' % (annotee,)) | |
306 if not is_executable(butler): | |
307 raise LogDogNotBootstrapped('Butler is not executable: %s' % (butler,)) | |
308 | |
309 # Determine LogDog verbosity. | |
310 logdog_verbose = [] | |
311 if opts.logdog_verbose == 0: | |
312 pass | |
313 elif opts.logdog_verbose == 1: | |
314 logdog_verbose.extend('-log_level=info') | |
315 else: | |
316 logdog_verbose.extend('-log_level=debug') | |
317 | |
318 service_account_args = [] | |
319 service_account_json = _get_service_account_json(rt, opts) | |
320 if service_account_json: | |
321 service_account_args += ['-service-account-json', service_account_json] | |
322 | |
323 streamserver_uri_gen = rt.logdog_butler_streamserver_gen | |
324 if not streamserver_uri_gen: | |
325 raise LogDogBootstrapError('No streamserver URI generator.') | |
326 streamserver_uri = streamserver_uri_gen(rt.workdir) | |
327 | |
328 # Dump Annotee command to JSON. | |
329 cmd_json = os.path.join(bootstrap_dir, 'annotee_cmd.json') | |
330 with open(cmd_json, 'w') as fd: | |
331 json.dump(cmd, fd) | |
332 | |
333 cmd = [ | |
334 # Butler Command. | |
335 butler, | |
336 ] + logdog_verbose + service_account_args + [ | |
337 '-output', 'gcps,project="%s",topic="%s"' % (rt.logdog_pubsub.project, | |
338 rt.logdog_pubsub.topic), | |
339 'run', | |
340 '-streamserver-uri', streamserver_uri, | |
341 '--', | |
342 | |
343 # Annotee Command. | |
344 annotee, | |
345 ] + logdog_verbose + [ | |
346 '-json-args-path', cmd_json, | |
347 ] | |
348 rv, _ = _run_command(cmd, dry_run=opts.dry_run) | |
349 if rv in LOGDOG_ERROR_RETURNCODES: | |
350 raise LogDogBootstrapError('LogDog Error (%d)' % (rv,)) | |
351 return rv | |
352 | |
353 | |
354 def _assert_logdog_whitelisted(rt): | |
355 """Asserts that the runtime environment is whitelisted for LogDog bootstrap. | |
356 | |
357 Args: | |
358 rt (RecipeRuntime): The runtime to test. | |
359 Raises: | |
360 LogDogNotBootstrapped: if the runtime is not whitelisted. | |
361 """ | |
362 mastername, buildername = rt.get('mastername'), rt.get('buildername') | |
363 if not all((mastername, buildername)): | |
364 raise LogDogNotBootstrapped('Required mastername/buildernmae is not set.') | |
365 | |
366 # Key on mastername. | |
367 bdict = LOGDOG_WHITELIST_MASTER_BUILDERS.get(mastername) | |
368 if bdict is not None: | |
369 # Key on buildername. | |
370 if WHITELIST_ALL in bdict or buildername in bdict: | |
371 LOGGER.info('Whitelisted master %s, builder %s.', | |
372 mastername, buildername) | |
373 return | |
374 raise LogDogNotBootstrapped('Master %s, builder %s is not whitelisted.' % ( | |
375 mastername, buildername)) | |
52 | 376 |
53 | 377 |
54 def get_recipe_properties(build_properties, use_factory_properties_from_disk): | 378 def get_recipe_properties(build_properties, use_factory_properties_from_disk): |
55 """Constructs the recipe's properties from buildbot's properties. | 379 """Constructs the recipe's properties from buildbot's properties. |
56 | 380 |
57 This retrieves the current factory properties from the master_config | 381 This retrieves the current factory properties from the master_config |
58 in the slave's checkout (no factory properties are handed to us from the | 382 in the slave's checkout (no factory properties are handed to us from the |
59 master), and merges in the build properties. | 383 master), and merges in the build properties. |
60 | 384 |
61 Using the values from the checkout allows us to do things like change | 385 Using the values from the checkout allows us to do things like change |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
106 for name, value in factory_properties.items(): | 430 for name, value in factory_properties.items(): |
107 if not build_properties.has_key(name): | 431 if not build_properties.has_key(name): |
108 s.set_build_property(name, json.dumps(value)) | 432 s.set_build_property(name, json.dumps(value)) |
109 | 433 |
110 # Build properties override factory properties. | 434 # Build properties override factory properties. |
111 properties = factory_properties.copy() | 435 properties = factory_properties.copy() |
112 properties.update(build_properties) | 436 properties.update(build_properties) |
113 return properties | 437 return properties |
114 | 438 |
115 | 439 |
116 def get_factory_properties_from_disk(mastername, buildername): | 440 def get_factory_properties_from_disk(rt): |
117 master_list = master_cfg_utils.GetMasters() | 441 master_list = master_cfg_utils.GetMasters() |
118 master_path = None | 442 master_path = None |
119 for name, path in master_list: | 443 for name, path in master_list: |
120 if name == mastername: | 444 if name == rt.mastername: |
121 master_path = path | 445 master_path = path |
122 | 446 |
123 if not master_path: | 447 if not master_path: |
124 raise LookupError('master "%s" not found.' % mastername) | 448 raise LookupError('master "%s" not found.' % rt.mastername) |
125 | 449 |
126 script_path = os.path.join(BUILD_ROOT, 'scripts', 'tools', | 450 script_path = os.path.join(BUILD_ROOT, 'scripts', 'tools', |
127 'dump_master_cfg.py') | 451 'dump_master_cfg.py') |
128 | 452 |
129 with namedTempFile() as fname: | 453 master_json = os.path.join(rt.workdir, 'dump_master_cfg.json') |
130 dump_cmd = [sys.executable, | 454 dump_cmd = [sys.executable, |
131 script_path, | 455 script_path, |
132 master_path, fname] | 456 master_path, master_json] |
133 proc = subprocess.Popen(dump_cmd, cwd=BUILD_ROOT, stdout=subprocess.PIPE, | 457 proc = subprocess.Popen(dump_cmd, cwd=BUILD_ROOT, stdout=subprocess.PIPE, |
134 stderr=subprocess.PIPE) | 458 stderr=subprocess.PIPE) |
135 out, err = proc.communicate() | 459 out, err = proc.communicate() |
136 exit_code = proc.returncode | 460 if proc.returncode: |
461 raise LookupError('Failed to get the master config; dump_master_cfg %s' | |
462 'returned %d):\n%s\n%s\n'% ( | |
463 rt.mastername, proc.returncode, out, err)) | |
137 | 464 |
138 if exit_code: | 465 with open(master_json, 'rU') as f: |
139 raise LookupError('Failed to get the master config; dump_master_cfg %s' | 466 config = json.load(f) |
140 'returned %d):\n%s\n%s\n'% ( | |
141 mastername, exit_code, out, err)) | |
142 | |
143 with open(fname, 'rU') as f: | |
144 config = json.load(f) | |
145 | 467 |
146 # Now extract just the factory properties for the requested builder | 468 # Now extract just the factory properties for the requested builder |
147 # from the master config. | 469 # from the master config. |
148 props = {} | 470 props = {} |
149 found = False | 471 found = False |
150 for builder_dict in config['builders']: | 472 for builder_dict in config['builders']: |
151 if builder_dict['name'] == buildername: | 473 if builder_dict['name'] == rt.buildername: |
152 found = True | 474 found = True |
153 factory_properties = builder_dict['factory']['properties'] | 475 factory_properties = builder_dict['factory']['properties'] |
154 for name, (value, _) in factory_properties.items(): | 476 for name, (value, _) in factory_properties.items(): |
155 props[name] = value | 477 props[name] = value |
156 | 478 |
157 if not found: | 479 if not found: |
158 raise LookupError('builder "%s" not found on in master "%s"' % | 480 raise LookupError('builder "%s" not found on in master "%s"' % |
159 (buildername, mastername)) | 481 (rt.buildername, rt.mastername)) |
160 | 482 |
161 if 'recipe' not in props: | 483 if 'recipe' not in props: |
162 raise LookupError('Cannot find recipe for %s on %s' % | 484 raise LookupError('Cannot find recipe for %s on %s' % |
163 (buildername, mastername)) | 485 (rt.buildername, rt.mastername)) |
164 | 486 |
165 return props | 487 return props |
166 | 488 |
167 | 489 |
168 def get_args(argv): | 490 def get_args(argv): |
169 """Process command-line arguments.""" | 491 """Process command-line arguments.""" |
492 parser = argparse.ArgumentParser( | |
iannucci
2015/12/01 02:38:01
separate CL please
dnj
2015/12/01 03:36:04
mmk
| |
493 description='Entry point for annotated builds.') | |
494 parser.add_argument('-v', '--verbose', | |
495 action='count', default=0, | |
496 help='Increase verbosity. This can be specified multiple times.') | |
497 parser.add_argument('-d', '--dry-run', action='store_true', | |
498 help='Perform the setup, but refrain from executing the recipe.') | |
499 parser.add_argument('-l', '--leak', action='store_true', | |
500 help="Refrain from cleaning up generated artifacts.") | |
501 parser.add_argument('--build-properties', | |
502 type=json.loads, default={}, | |
503 help='build properties in JSON format') | |
504 parser.add_argument('--factory-properties', | |
505 type=json.loads, default={}, | |
506 help='factory properties in JSON format') | |
507 parser.add_argument('--build-properties-gz', dest='build_properties', | |
508 type=chromium_utils.convert_gz_json_type, default={}, | |
509 help='build properties in b64 gz JSON format') | |
510 parser.add_argument('--factory-properties-gz', dest='factory_properties', | |
511 type=chromium_utils.convert_gz_json_type, default={}, | |
512 help='factory properties in b64 gz JSON format') | |
513 parser.add_argument('--keep-stdin', action='store_true', default=False, | |
514 help='don\'t close stdin when running recipe steps') | |
515 parser.add_argument('--master-overrides-slave', action='store_true', | |
516 help='use the property values given on the command line from the master, ' | |
517 'not the ones looked up on the slave') | |
518 parser.add_argument('--use-factory-properties-from-disk', | |
519 action='store_true', default=False, | |
520 help='use factory properties loaded from disk on the slave') | |
170 | 521 |
171 parser = optparse.OptionParser( | 522 group = parser.add_argument_group('LogDog Bootstrap') |
172 description='Entry point for annotated builds.') | 523 group.add_argument('-V', '--logdog-verbose', |
173 parser.add_option('--build-properties', | 524 action='count', default=0, |
174 action='callback', callback=chromium_utils.convert_json, | 525 help='Increase LogDog verbosity. This can be specified multiple times.') |
175 type='string', default={}, | 526 group.add_argument('-f', '--logdog-force', action='store_true', |
176 help='build properties in JSON format') | 527 help='Force LogDog bootstrapping, even if the system is not configured.') |
177 parser.add_option('--factory-properties', | 528 group.add_argument('--logdog-butler-path', |
178 action='callback', callback=chromium_utils.convert_json, | 529 help='Path to the LogDog Butler. If empty, one will be probed/downloaded ' |
179 type='string', default={}, | 530 'from CIPD.') |
180 help='factory properties in JSON format') | 531 group.add_argument('--logdog-annotee-path', |
181 parser.add_option('--build-properties-gz', | 532 help='Path to the LogDog Annotee. If empty, one will be ' |
182 action='callback', callback=chromium_utils.convert_gz_json, | 533 'probed/downloaded from CIPD.') |
183 type='string', default={}, dest='build_properties', | 534 group.add_argument('--logdog-service-account-json', |
184 help='build properties in b64 gz JSON format') | 535 help='Path to the service account JSON. If one is not provided, the ' |
185 parser.add_option('--factory-properties-gz', | 536 'local system credentials will be used.') |
186 action='callback', callback=chromium_utils.convert_gz_json, | |
187 type='string', default={}, dest='factory_properties', | |
188 help='factory properties in b64 gz JSON format') | |
189 parser.add_option('--keep-stdin', action='store_true', default=False, | |
190 help='don\'t close stdin when running recipe steps') | |
191 parser.add_option('--master-overrides-slave', action='store_true', | |
192 help='use the property values given on the command line ' | |
193 'from the master, not the ones looked up on the slave') | |
194 parser.add_option('--use-factory-properties-from-disk', | |
195 action='store_true', default=False, | |
196 help='use factory properties loaded from disk on the slave') | |
197 return parser.parse_args(argv) | 537 return parser.parse_args(argv) |
198 | 538 |
199 | 539 |
200 def update_scripts(): | 540 def update_scripts(): |
201 if os.environ.get('RUN_SLAVE_UPDATED_SCRIPTS'): | 541 if os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS', None) is None: |
iannucci
2015/12/01 02:38:01
technically this is a semantic change: before it w
dnj
2015/12/01 03:36:04
Good point, I'll go ahead and revert this.
| |
202 os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS') | |
203 return False | 542 return False |
204 | 543 |
205 stream = annotator.StructuredAnnotationStream() | 544 stream = annotator.StructuredAnnotationStream() |
206 | 545 |
207 with stream.step('update_scripts') as s: | 546 with stream.step('update_scripts') as s: |
208 gclient_name = 'gclient' | 547 gclient_name = 'gclient' |
209 if sys.platform.startswith('win'): | 548 if sys.platform.startswith('win'): |
210 gclient_name += '.bat' | 549 gclient_name += '.bat' |
211 gclient_path = os.path.join(BUILD_ROOT, '..', 'depot_tools', gclient_name) | 550 gclient_path = os.path.join(BUILD_ROOT, '..', 'depot_tools', gclient_name) |
212 gclient_cmd = [gclient_path, 'sync', '--force', '--verbose'] | 551 gclient_cmd = [gclient_path, 'sync', '--force', '--verbose'] |
213 try: | 552 try: |
214 fd, output_json = tempfile.mkstemp() | 553 fd, output_json = tempfile.mkstemp() |
215 os.close(fd) | 554 os.close(fd) |
216 gclient_cmd += ['--output-json', output_json] | 555 gclient_cmd += ['--output-json', output_json] |
217 except Exception: | 556 except Exception: |
218 # Super paranoia try block. | 557 # Super paranoia try block. |
219 output_json = None | 558 output_json = None |
220 cmd_dict = { | 559 cmd_dict = { |
221 'name': 'update_scripts', | 560 'name': 'update_scripts', |
222 'cmd': gclient_cmd, | 561 'cmd': gclient_cmd, |
223 'cwd': BUILD_ROOT, | 562 'cwd': BUILD_ROOT, |
224 } | 563 } |
225 annotator.print_step(cmd_dict, os.environ, stream) | 564 annotator.print_step(cmd_dict, os.environ, stream) |
226 if subprocess.call(gclient_cmd, cwd=BUILD_ROOT) != 0: | 565 rv, _ = _run_command(gclient_cmd, cwd=BUILD_ROOT) |
566 if rv != 0: | |
227 s.step_text('gclient sync failed!') | 567 s.step_text('gclient sync failed!') |
228 s.step_warnings() | 568 s.step_warnings() |
229 elif output_json: | 569 elif output_json: |
230 try: | 570 try: |
231 with open(output_json, 'r') as f: | 571 with open(output_json, 'r') as f: |
232 gclient_json = json.load(f) | 572 gclient_json = json.load(f) |
233 for line in json.dumps( | 573 for line in json.dumps( |
234 gclient_json, sort_keys=True, | 574 gclient_json, sort_keys=True, |
235 indent=4, separators=(',', ': ')).splitlines(): | 575 indent=4, separators=(',', ': ')).splitlines(): |
236 s.step_log_line('gclient_json', line) | 576 s.step_log_line('gclient_json', line) |
237 s.step_log_end('gclient_json') | 577 s.step_log_end('gclient_json') |
238 revision = gclient_json['solutions']['build/']['revision'] | 578 revision = gclient_json['solutions']['build/']['revision'] |
239 scm = gclient_json['solutions']['build/']['scm'] | 579 scm = gclient_json['solutions']['build/']['scm'] |
240 s.step_text('%s - %s' % (scm, revision)) | 580 s.step_text('%s - %s' % (scm, revision)) |
241 s.set_build_property('build_scm', json.dumps(scm)) | 581 s.set_build_property('build_scm', json.dumps(scm)) |
242 s.set_build_property('build_revision', json.dumps(revision)) | 582 s.set_build_property('build_revision', json.dumps(revision)) |
243 except Exception as e: | 583 except Exception as e: |
244 s.step_text('Unable to process gclient JSON %s' % repr(e)) | 584 s.step_text('Unable to process gclient JSON %s' % repr(e)) |
245 s.step_warnings() | 585 s.step_warnings() |
246 finally: | 586 finally: |
247 try: | 587 try: |
248 os.remove(output_json) | 588 os.remove(output_json) |
249 except Exception as e: | 589 except Exception as e: |
250 print >> sys.stderr, "LEAKED:", output_json, e | 590 LOGGER.warning("LEAKED: %s", output_json, exc_info=True) |
251 else: | 591 else: |
252 s.step_text('Unable to get SCM data') | 592 s.step_text('Unable to get SCM data') |
253 s.step_warnings() | 593 s.step_warnings() |
254 | 594 |
255 os.environ['RUN_SLAVE_UPDATED_SCRIPTS'] = '1' | 595 os.environ['RUN_SLAVE_UPDATED_SCRIPTS'] = '1' |
256 | 596 |
257 # After running update scripts, set PYTHONIOENCODING=UTF-8 for the real | 597 # After running update scripts, set PYTHONIOENCODING=UTF-8 for the real |
258 # annotated_run. | 598 # annotated_run. |
259 os.environ['PYTHONIOENCODING'] = 'UTF-8' | 599 os.environ['PYTHONIOENCODING'] = 'UTF-8' |
260 | 600 |
261 return True | 601 return True |
262 | 602 |
263 | 603 |
264 def clean_old_recipe_engine(): | 604 def clean_old_recipe_engine(): |
265 """Clean stale pycs from the old location of recipe_engine. | 605 """Clean stale pycs from the old location of recipe_engine. |
266 | 606 |
267 This function should only be needed for a little while after the recipe | 607 This function should only be needed for a little while after the recipe |
268 packages rollout (2015-09-16). | 608 packages rollout (2015-09-16). |
269 """ | 609 """ |
270 for (dirpath, _, filenames) in os.walk( | 610 for (dirpath, _, filenames) in os.walk( |
271 os.path.join(BUILD_ROOT, 'third_party', 'recipe_engine')): | 611 os.path.join(BUILD_ROOT, 'third_party', 'recipe_engine')): |
272 for filename in filenames: | 612 for filename in filenames: |
273 if filename.endswith('.pyc'): | 613 if filename.endswith('.pyc'): |
274 path = os.path.join(dirpath, filename) | 614 os.remove(os.path.join(dirpath, filename)) |
275 os.remove(path) | |
276 | 615 |
277 | 616 |
278 @contextlib.contextmanager | 617 def write_monitoring_event(rt, outdir): |
279 def build_data_directory(): | 618 if not (rt.run_cmd and os.path.exists(rt.run_cmd)): |
280 """Context manager that creates a build-specific directory. | 619 LOGGER.warning('Unable to find run.py at %s, no events will be sent.', |
620 rt.run_cmd) | |
621 return | |
281 | 622 |
282 The directory is wiped when exiting. | 623 hostname = socket.getfqdn() |
624 if hostname: # just in case getfqdn() returns None. | |
625 hostname = hostname.split('.')[0] | |
626 else: | |
627 hostname = None | |
283 | 628 |
284 Yields: | 629 try: |
285 build_data (str or None): full path to a writeable directory. Return None if | 630 cmd = [rt.run_cmd, 'infra.tools.send_monitoring_event', |
286 no directory can be found or if it's not writeable. | 631 '--event-mon-output-file', |
287 """ | 632 os.path.join(outdir, 'log_request_proto'), |
288 prefix = 'build_data' | 633 '--event-mon-run-type', 'file', |
289 | 634 '--event-mon-service-name', |
290 # TODO(pgervais): import that from infra_libs.logs instead | 635 'buildbot/master/master.%s' |
291 if sys.platform.startswith('win'): # pragma: no cover | 636 % rt.get('mastername', 'UNKNOWN'), |
292 DEFAULT_LOG_DIRECTORIES = [ | 637 '--build-event-build-name', |
293 'E:\\chrome-infra-logs', | 638 rt.get('buildername', 'UNKNOWN'), |
294 'C:\\chrome-infra-logs', | 639 '--build-event-build-number', |
295 ] | 640 str(rt.get('buildnumber', 0)), |
296 else: | 641 '--build-event-build-scheduling-time', |
297 DEFAULT_LOG_DIRECTORIES = ['/var/log/chrome-infra'] | 642 str(1000*int(rt.get('requestedAt', 0))), |
298 | 643 '--build-event-type', 'BUILD', |
299 build_data_dir = None | 644 '--event-mon-timestamp-kind', 'POINT', |
300 for candidate in DEFAULT_LOG_DIRECTORIES: | 645 # And use only defaults for credentials. |
301 if os.path.isdir(candidate): | 646 ] |
302 build_data_dir = os.path.join(candidate, prefix) | 647 # Add this conditionally so that we get an error in |
303 break | 648 # send_monitoring_event log files in case it isn't present. |
304 | 649 if hostname: |
305 # Remove any leftovers and recreate the dir. | 650 cmd += ['--build-event-hostname', hostname] |
306 if build_data_dir: | 651 _check_command(cmd) |
307 print >> sys.stderr, "Creating directory" | 652 except Exception: |
308 # TODO(pgervais): use infra_libs.rmtree instead. | 653 LOGGER.warning("Failed to send monitoring event.", exc_info=True) |
309 if os.path.exists(build_data_dir): | |
310 try: | |
311 shutil.rmtree(build_data_dir) | |
312 except Exception as exc: | |
313 # Catching everything: we don't want to break any builds for that reason | |
314 print >> sys.stderr, ( | |
315 "FAILURE: path can't be deleted: %s.\n%s" % (build_data_dir, str(exc)) | |
316 ) | |
317 print >> sys.stderr, "Creating directory" | |
318 | |
319 if not os.path.exists(build_data_dir): | |
320 try: | |
321 os.mkdir(build_data_dir) | |
322 except Exception as exc: | |
323 print >> sys.stderr, ( | |
324 "FAILURE: directory can't be created: %s.\n%s" % | |
325 (build_data_dir, str(exc)) | |
326 ) | |
327 build_data_dir = None | |
328 | |
329 # Under this line build_data_dir should point to an existing empty dir | |
330 # or be None. | |
331 yield build_data_dir | |
332 | |
333 # Clean up after ourselves | |
334 if build_data_dir: | |
335 # TODO(pgervais): use infra_libs.rmtree instead. | |
336 try: | |
337 shutil.rmtree(build_data_dir) | |
338 except Exception as exc: | |
339 # Catching everything: we don't want to break any builds for that reason. | |
340 print >> sys.stderr, ( | |
341 "FAILURE: path can't be deleted: %s.\n%s" % (build_data_dir, str(exc)) | |
342 ) | |
343 | 654 |
344 | 655 |
345 def main(argv): | 656 def main(argv): |
346 opts, _ = get_args(argv) | 657 opts = get_args(argv) |
658 if opts.verbose == 0: | |
659 level = logging.INFO | |
660 else: | |
661 level = logging.DEBUG | |
662 logging.getLogger().setLevel(level) | |
663 | |
347 # TODO(crbug.com/551165): remove flag "factory_properties". | 664 # TODO(crbug.com/551165): remove flag "factory_properties". |
348 use_factory_properties_from_disk = (opts.use_factory_properties_from_disk or | 665 use_factory_properties_from_disk = (opts.use_factory_properties_from_disk or |
349 bool(opts.factory_properties)) | 666 bool(opts.factory_properties)) |
350 properties = get_recipe_properties( | 667 properties = get_recipe_properties( |
351 opts.build_properties, use_factory_properties_from_disk) | 668 opts.build_properties, use_factory_properties_from_disk) |
352 | 669 |
353 clean_old_recipe_engine() | 670 clean_old_recipe_engine() |
354 | 671 |
355 # Find out if the recipe we intend to run is in build_internal's recipes. If | 672 # Enter our runtime enviornment. |
356 # so, use recipes.py from there, otherwise use the one from build. | 673 with RecipeRuntime.enter(opts.leak, **properties) as rt: |
357 recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py' | 674 LOGGER.debug('Loaded runtime: %s', rt) |
358 if os.path.exists(os.path.join(BUILD_LIMITED_ROOT, 'recipes', recipe_file)): | |
359 recipe_runner = os.path.join(BUILD_LIMITED_ROOT, 'recipes.py') | |
360 else: | |
361 recipe_runner = os.path.join(SCRIPT_PATH, 'recipes.py') | |
362 | 675 |
363 with build_data_directory() as build_data_dir: | 676 # Find out if the recipe we intend to run is in build_internal's recipes. If |
364 # Create a LogRequestLite proto containing this build's information. | 677 # so, use recipes.py from there, otherwise use the one from build. |
365 if build_data_dir: | 678 recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py' |
366 properties['build_data_dir'] = build_data_dir | 679 if os.path.exists(os.path.join(BUILD_LIMITED_ROOT, 'recipes', recipe_file)): |
680 recipe_runner = os.path.join(BUILD_LIMITED_ROOT, 'recipes.py') | |
681 else: | |
682 recipe_runner = os.path.join(SCRIPT_PATH, 'recipes.py') | |
367 | 683 |
368 hostname = socket.getfqdn() | 684 # Setup monitoring directory and send a monitoring event. |
369 if hostname: # just in case getfqdn() returns None. | 685 build_data_dir = ensure_directory(rt.workdir, 'build_data') |
370 hostname = hostname.split('.')[0] | 686 properties['build_data_dir'] = build_data_dir |
371 else: | |
372 hostname = None | |
373 | 687 |
374 if RUN_CMD and os.path.exists(RUN_CMD): | 688 # Write our annotated_run.py monitoring event. |
375 try: | 689 write_monitoring_event(rt, build_data_dir) |
376 cmd = [RUN_CMD, 'infra.tools.send_monitoring_event', | |
377 '--event-mon-output-file', | |
378 os.path.join(build_data_dir, 'log_request_proto'), | |
379 '--event-mon-run-type', 'file', | |
380 '--event-mon-service-name', | |
381 'buildbot/master/master.%s' | |
382 % properties.get('mastername', 'UNKNOWN'), | |
383 '--build-event-build-name', | |
384 properties.get('buildername', 'UNKNOWN'), | |
385 '--build-event-build-number', | |
386 str(properties.get('buildnumber', 0)), | |
387 '--build-event-build-scheduling-time', | |
388 str(1000*int(properties.get('requestedAt', 0))), | |
389 '--build-event-type', 'BUILD', | |
390 '--event-mon-timestamp-kind', 'POINT', | |
391 # And use only defaults for credentials. | |
392 ] | |
393 # Add this conditionally so that we get an error in | |
394 # send_monitoring_event log files in case it isn't present. | |
395 if hostname: | |
396 cmd += ['--build-event-hostname', hostname] | |
397 subprocess.call(cmd) | |
398 except Exception: | |
399 print >> sys.stderr, traceback.format_exc() | |
400 | 690 |
401 else: | 691 # Dump properties to JSON and build recipe command. |
402 print >> sys.stderr, ( | 692 props_file = os.path.join(rt.workdir, 'recipe_properties.json') |
403 'WARNING: Unable to find run.py at %r, no events will be sent.' | 693 with open(props_file, 'w') as fh: |
404 % str(RUN_CMD) | 694 json.dump(properties, fh) |
405 ) | 695 cmd = [ |
696 sys.executable, '-u', recipe_runner, | |
697 'run', | |
698 '--workdir=%s' % os.getcwd(), | |
699 '--properties-file=%s' % props_file, | |
700 rt.recipe, | |
701 ] | |
406 | 702 |
407 with namedTempFile() as props_file: | 703 status = None |
408 with open(props_file, 'w') as fh: | 704 try: |
409 fh.write(json.dumps(properties)) | 705 if not opts.logdog_force: |
410 cmd = [ | 706 _assert_logdog_whitelisted(rt) |
iannucci
2015/12/01 02:38:01
why not just have this return a status code? I don
dnj
2015/12/01 03:36:04
I chose exceptions here because I'm already surrou
| |
411 sys.executable, '-u', recipe_runner, | |
412 'run', | |
413 '--workdir=%s' % os.getcwd(), | |
414 '--properties-file=%s' % props_file, | |
415 properties['recipe'] ] | |
416 status = subprocess.call(cmd) | |
417 | 707 |
418 # TODO(pgervais): Send events from build_data_dir to the endpoint. | 708 status = _logdog_bootstrap(rt, opts, cmd) |
iannucci
2015/12/01 02:38:01
So IIUC, on windows this will now look like
annot
dnj
2015/12/01 03:36:04
It should be:
annotated_run.py
annotated_run.py
| |
709 except LogDogNotBootstrapped as e: | |
710 LOGGER.info('Not bootstrapped: %s', e.message) | |
711 except LogDogBootstrapError as e: | |
712 LOGGER.warning('Could not bootstrap LogDog: %s', e.message) | |
713 except Exception: | |
714 LOGGER.exception('Exception while bootstrapping LogDog.') | |
715 finally: | |
716 if status is None: | |
717 LOGGER.info('Not using LogDog. Invoking `annotated_run.py` directly.') | |
718 status, _ = _run_command(cmd, dry_run=opts.dry_run) | |
719 | |
720 # TODO(pgervais): Send events from build_data_dir to the endpoint. | |
419 return status | 721 return status |
420 | 722 |
723 | |
421 def shell_main(argv): | 724 def shell_main(argv): |
422 if update_scripts(): | 725 if update_scripts(): |
423 return subprocess.call([sys.executable] + argv) | 726 # Re-execute with the updated annotated_run.py. |
727 rv, _ = _run_command([sys.executable] + argv) | |
728 return rv | |
424 else: | 729 else: |
425 return main(argv) | 730 return main(argv[1:]) |
426 | 731 |
427 | 732 |
428 if __name__ == '__main__': | 733 if __name__ == '__main__': |
734 logging.basicConfig(level=logging.INFO) | |
429 sys.exit(shell_main(sys.argv)) | 735 sys.exit(shell_main(sys.argv)) |
OLD | NEW |