OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 import argparse | 6 import argparse |
7 import collections | |
7 import contextlib | 8 import contextlib |
9 import datetime | |
10 import hashlib | |
11 import itertools | |
8 import json | 12 import json |
13 import logging | |
9 import os | 14 import os |
15 import platform | |
10 import shutil | 16 import shutil |
11 import socket | 17 import socket |
12 import subprocess | 18 import subprocess |
13 import sys | 19 import sys |
14 import tempfile | 20 import tempfile |
15 import traceback | 21 |
16 | 22 |
23 # Install Infra build environment. | |
17 BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( | 24 BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( |
18 os.path.abspath(__file__)))) | 25 os.path.abspath(__file__)))) |
19 sys.path.append(os.path.join(BUILD_ROOT, 'scripts')) | 26 sys.path.insert(0, os.path.join(BUILD_ROOT, 'scripts')) |
20 sys.path.append(os.path.join(BUILD_ROOT, 'third_party')) | 27 import common.env |
28 common.env.Install() | |
21 | 29 |
22 from common import annotator | 30 from common import annotator |
23 from common import chromium_utils | 31 from common import chromium_utils |
24 from common import master_cfg_utils | 32 from common import master_cfg_utils |
25 | 33 from slave import gce |
26 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) | 34 |
27 BUILD_LIMITED_ROOT = os.path.join( | 35 SCRIPT_PATH = os.path.join(common.env.Build, 'scripts', 'slave') |
28 os.path.dirname(BUILD_ROOT), 'build_internal', 'scripts', 'slave') | 36 BUILD_LIMITED_ROOT = os.path.join(common.env.BuildInternal, 'scripts', 'slave') |
29 | 37 |
30 PACKAGE_CFG = os.path.join( | 38 # Logging instance. |
31 os.path.dirname(os.path.dirname(SCRIPT_PATH)), | 39 LOGGER = logging.getLogger('annotated_run') |
32 'infra', 'config', 'recipes.cfg') | 40 |
33 | 41 # Return codes used by Butler/Annotee to indicate their failure (as opposed to |
34 if sys.platform.startswith('win'): | 42 # a forwarded return code from the underlying process). |
35 # TODO(pgervais): add windows support | 43 LOGDOG_ERROR_RETURNCODES = ( |
36 # QQ: Where is infra/run.py on windows machines? | 44 # Butler runtime error. |
37 RUN_CMD = None | 45 250, |
38 else: | 46 # Annotee runtime error. |
39 RUN_CMD = os.path.join('/', 'opt', 'infra-python', 'run.py') | 47 251, |
40 | 48 ) |
41 @contextlib.contextmanager | 49 |
42 def namedTempFile(): | 50 # Whitelist of {master}=>[{builder}|WHITELIST_ALL] whitelisting specific masters |
43 fd, name = tempfile.mkstemp() | 51 # and builders for experimental LogDog/Annotee export. |
44 os.close(fd) # let the exceptions fly | 52 LOGDOG_WHITELIST_MASTER_BUILDERS = { |
45 try: | 53 } |
46 yield name | 54 |
47 finally: | 55 # Sentinel value that, if present in master config, matches all builders |
56 # underneath that master. | |
57 WHITELIST_ALL = '*' | |
58 | |
59 # Configuration for a Pub/Sub topic. | |
60 PubSubConfig = collections.namedtuple('PubSubConfig', ('project', 'topic')) | |
61 | |
62 # RecipeRuntime will probe this for values. | |
63 # - First, (system, platform) | |
64 # - Then, (system,) | |
65 # - Finally, (), | |
66 PLATFORM_CONFIG = { | |
67 # All systems. | |
68 (): { | |
69 'logdog_pubsub': PubSubConfig( | |
70 project='luci-logdog', | |
71 topic='chrome-infra-beta', | |
72 ), | |
73 }, | |
74 | |
75 # Linux | |
76 ('Linux',): { | |
77 'run_cmd': '/opt/infra-python/run.py', | |
78 'cipd_static_paths': ( | |
79 # XXX: Get this right? | |
pgervais
2015/12/01 18:58:28
XXX == FIXME, TODO?
dnj
2015/12/01 22:39:55
XXX, want to fix this before commit.
| |
80 '/opt/chrome-infra', | |
81 ), | |
82 'credential_paths': ( | |
83 # XXX: Get this right? | |
84 '/opt/infra/service_accounts', | |
85 ), | |
86 'logdog_butler_streamserver_gen': lambda rt: os.path.join(rt.workdir, | |
87 'butler.sock'), | |
88 }, | |
89 ('Linux', 'x86_64'): { | |
90 'logdog_cipd_packages': { | |
pgervais
2015/12/01 18:58:28
Why is this handled by annotated_run.py? It seems
dnj
2015/12/01 22:39:55
Done.
| |
91 'infra/tools/luci/logdog/butler/linux-amd64': 'testing', | |
92 'infra/tools/luci/logdog/annotee/linux-amd64': 'testing', | |
93 }, | |
94 }, | |
95 | |
96 # Mac OSX | |
97 ('Darwin',): { | |
98 'run_cmd': '/opt/infra-python/run.py', | |
99 }, | |
100 | |
101 # Windows | |
102 ('Windows',): {}, | |
103 } | |
104 | |
105 | |
106 class LogDogNotBootstrapped(Exception): | |
107 pass | |
108 | |
109 | |
110 class LogDogBootstrapError(Exception): | |
111 pass | |
112 | |
113 | |
114 def is_executable(path): | |
115 return os.path.isfile(path) and os.access(path, os.X_OK) | |
116 | |
117 | |
118 def ensure_directory(*path): | |
119 path = os.path.join(*path) | |
120 if not os.path.isdir(path): | |
121 os.makedirs(path) | |
122 return path | |
123 | |
124 | |
125 def _run_command(cmd, **kwargs): | |
126 dry_run = kwargs.pop('dry_run', False) | |
127 | |
128 LOGGER.debug('Executing command: %s', cmd) | |
129 if dry_run: | |
130 LOGGER.info('(Dry Run) Not executing command.') | |
131 return 0, '' | |
132 proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT) | |
133 stdout, _ = proc.communicate() | |
134 | |
135 LOGGER.debug('Process [%s] returned [%d] with output:\n%s', | |
136 cmd, proc.returncode, stdout) | |
137 return proc.returncode, stdout | |
138 | |
139 | |
140 def _check_command(*args, **kwargs): | |
141 rv, stdout = _run_command(args, **kwargs) | |
142 if rv != 0: | |
143 raise ValueError('Process exited with non-zero return code (%d)' % (rv,)) | |
pgervais
2015/12/01 18:58:28
Can we raise CalledProcessError here to be consist
dnj
2015/12/01 22:39:55
Done.
| |
144 return stdout | |
145 | |
146 | |
147 class RecipeRuntime(object): | |
148 """RecipeRuntime is the platform-specific runtime enviornment. | |
pgervais
2015/12/01 18:58:28
Typo: enviornment
dnj
2015/12/01 22:39:55
Acknowledged.
| |
149 | |
150 The runtime is loaded with a set of read-only attributes that are a | |
151 combination of plaetform and runtime values used in the setup and execution of | |
pgervais
2015/12/01 18:58:28
Typo here as well.
dnj
2015/12/01 22:39:55
Acknowledged.
| |
152 the recipe engine. | |
153 """ | |
154 | |
155 _SENTINEL = object() | |
156 | |
157 def __init__(self, **kwargs): | |
158 self._attrs = kwargs | |
159 | |
160 @classmethod | |
161 @contextlib.contextmanager | |
162 def enter(cls, leak, **kw): | |
163 """Enters the annotated_run environment. | |
164 | |
165 This creates a temporary directory for this annotation run that is | |
166 automatically cleaned up. It returns a RecipeRuntime object containing a | |
pgervais
2015/12/01 18:58:28
It is cleaned up if this script does not crash bef
dnj
2015/12/01 22:39:55
Perma-crash yes, but the layout also helps with th
| |
167 combination of the supplied keyword arguments and the platform-specific | |
168 configuration. | |
169 | |
170 Args: | |
171 leak (bool): If true, don't clean up the temporary directory on exit. | |
172 kw (dict): Key/value pairs to add as attributes to the RecipeRuntime. | |
173 """ | |
174 # Build our platform attributes. | |
175 p = (platform.system(), platform.processor()) | |
176 attrs = {} | |
177 for i in xrange(len(p)+1): | |
178 attrs.update(PLATFORM_CONFIG.get(p[:i], {})) | |
179 attrs.update(kw) | |
180 | |
181 basedir = ensure_directory(os.getcwd(), '.recipe_runtime') | |
48 try: | 182 try: |
49 os.remove(name) | 183 tdir = tempfile.mkdtemp(dir=basedir) |
50 except OSError as e: | 184 LOGGER.debug('Using temporary directory [%s].', tdir) |
51 print >> sys.stderr, "LEAK: %s: %s" % (name, e) | 185 |
186 attrs['workdir'] = tdir | |
187 yield cls(**attrs) | |
188 finally: | |
189 if basedir and os.path.isdir(basedir): | |
190 if not leak: | |
191 LOGGER.debug('Cleaning up temporary directory [%s].', basedir) | |
192 try: | |
193 # TODO(pgervais): use infra_libs.rmtree instead. | |
194 shutil.rmtree(basedir) | |
195 except Exception: | |
196 LOGGER.exception('Failed to clean up temporary directory [%s].', | |
197 basedir) | |
198 else: | |
199 LOGGER.warning('(--leak) Leaking temporary directory [%s].', basedir) | |
200 | |
201 def __getattr__(self, key): | |
pgervais
2015/12/01 18:58:28
What is the purpose of returning the superclass at
dnj
2015/12/01 22:39:55
Rewrote.
| |
202 # Class methods/variables. | |
203 value = getattr(super(RecipeRuntime, self), key, self._SENTINEL) | |
204 if value is not self._SENTINEL: | |
205 return value | |
206 | |
207 value = getattr(self, 'get')(key, self._SENTINEL) | |
208 if value is not self._SENTINEL: | |
209 return value | |
210 raise KeyError(key) | |
211 | |
212 def get(self, key, default=None): | |
213 value = self._attrs.get(key, self._SENTINEL) | |
214 if value is not self._SENTINEL: | |
215 return value | |
216 return default | |
217 | |
218 def __str__(self): | |
219 return str(self._attrs) | |
220 | |
221 | |
222 class CIPD(object): | |
223 _CIPD_NAME = 'cipd' | |
224 | |
225 def __init__(self, path, root): | |
pgervais
2015/12/01 18:58:28
Nit: path -> cipd_path
dnj
2015/12/01 22:39:55
Acknowledged.
| |
226 self._cipd_path = path | |
227 self._root = root | |
228 | |
229 @classmethod | |
230 def find(cls, rt, rootdir): | |
231 for p in itertools.chain( | |
232 iter(os.environ.get('PATH').split(os.pathsep)), | |
233 rt.cipd_static_paths): | |
234 candidate = os.path.join(p, cls._CIPD_NAME) | |
235 if os.path.isfile(candidate) and os.access(candidate, os.X_OK): | |
pgervais
2015/12/01 18:58:28
is_executable() instead
dnj
2015/12/01 22:39:55
Deprecated.
| |
236 return cls(candidate, rootdir) | |
237 return None | |
238 | |
239 def __call__(self, *args): | |
240 cmd = [self._cipd_path] | |
241 cmd.extend(args) | |
242 _check_command(*cmd) | |
243 | |
244 def path(self, *components): | |
245 return os.path.join(self._root, *components) | |
246 | |
247 def ensure(self, **packages): | |
248 if len(packages) == 0: | |
249 return | |
250 | |
251 # Emit package list. | |
252 package_list = self.path('package_list.txt') | |
253 lines = [ | |
254 '# Automatically generated CIPD package list (launcher.py)', | |
255 '# Generated at: %s' % (datetime.datetime.now().isoformat(),), | |
pgervais
2015/12/01 18:58:28
This does not show the timezone. You can use this
dnj
2015/12/01 22:39:55
Deprecated.
| |
256 '', | |
257 ] | |
258 for pkg, version in sorted(packages.iteritems()): | |
259 lines.append('%s %s' % (pkg, version)) | |
260 | |
261 ensure_directory(self._root) | |
262 with open(package_list, 'w+') as fd: | |
263 fd.write('\n'.join(lines)) | |
264 | |
265 # Ensure against the package list. | |
266 args = [ | |
267 'ensure', | |
268 '-root', self._root, | |
269 '-list', package_list, | |
270 ] | |
271 self(*args) | |
272 | |
273 | |
274 def _get_service_account_json(rt, opts): | |
275 """Returns (str/None): If specified, the path to the service account JSON. | |
276 | |
277 This method probes the local environemnt and returns a (possibly empty) list | |
278 of arguments to add to the Butler command line for authentication. | |
279 | |
280 If we're running on a GCE instance, no arguments will be returned, as GCE | |
281 service account is implicitly authenticated. If we're running on Baremetal, | |
282 a path to those credentials will be returned. | |
283 | |
284 Args: | |
285 rt (RecipeRuntime): The runtime environment. | |
286 Raises: | |
287 |LogDogBootstrapError| if no credentials could be found. | |
288 """ | |
289 path = opts.get('service_account_json') | |
290 if path: | |
291 return path | |
292 | |
293 if gce.Authenticator.is_gce(): | |
294 LOGGER.info('Running on GCE. No credentials necessary.') | |
295 return None | |
296 | |
297 for credential_path in rt.get('credential_paths', ()): | |
298 candidate = os.path.join(credential_path, 'logdog_service_account.json') | |
299 if os.path.isfile(candidate): | |
300 return candidate | |
301 | |
302 raise LogDogBootstrapError('Could not find service account credentials.') | |
pgervais
2015/12/01 18:58:28
Listing the paths that have been tried will help w
dnj
2015/12/01 22:39:55
Done.
| |
303 | |
304 | |
305 def _logdog_bootstrap(rt, opts, cmd): | |
306 """Executes the recipe engine, bootstrapping it through LogDog/Annotee. | |
307 | |
308 This method executes the recipe engine, bootstrapping it through | |
309 LogDog/Annotee so its output and annotations are streamed to LogDog. The | |
310 bootstrap is configured to tee the annotations through STDOUT/STDERR so they | |
311 will still be sent to BuildBot. | |
312 | |
313 The overall setup here is: | |
314 [annotated_run.py] => [logdog_butler] => [logdog_annotee] => [recipes.py] | |
315 | |
316 Args: | |
317 rt (RecipeRuntime): Recipe runtime enviornment. | |
pgervais
2015/12/01 18:58:28
typo: enviornment
dnj
2015/12/01 22:39:55
Done.
| |
318 opts (argparse.Namespace): Command-line options. | |
319 cmd (list): The recipe runner command list to bootstrap. | |
320 | |
321 Returns (int): The return code of the recipe runner process. | |
322 | |
323 Raises: | |
324 LogDogNotBootstrapped: if the recipe engine was not executed because the | |
325 LogDog bootstrap requirements are not available. | |
326 LogDogBootstrapError: if there was an error bootstrapping the recipe runner | |
327 through LogDog. | |
328 """ | |
329 bootstrap_dir = ensure_directory(rt.tempdir, 'logdog_bootstrap') | |
330 butler, annotee = opts.logdog_butler_path, opts.logdog_annotee_path | |
331 if not (butler and annotee): | |
332 # Load packages via CIPD. | |
333 cipd = CIPD.find(rt, os.path.join(bootstrap_dir, 'cipd_root')) | |
334 if rt.logdog_cipd_packages: | |
335 if not cipd: | |
336 raise LogDogBootstrapError('Could not find CIPD binary.') | |
337 cipd.ensure(**rt.logdog_cipd_packages) | |
338 if not butler: | |
339 butler = cipd.path('logdog_butler') | |
340 if not annotee: | |
341 annotee = cipd.path('logdog_annotee') | |
342 | |
343 if not is_executable(annotee): | |
344 raise LogDogNotBootstrapped('Annotee is not executable: %s' % (annotee,)) | |
345 if not is_executable(butler): | |
346 raise LogDogNotBootstrapped('Butler is not executable: %s' % (butler,)) | |
347 | |
348 # Determine LogDog verbosity. | |
349 logdog_verbose = [] | |
350 if opts.logdog_verbose == 0: | |
351 pass | |
352 elif opts.logdog_verbose == 1: | |
353 logdog_verbose.extend('-log_level=info') | |
354 else: | |
355 logdog_verbose.extend('-log_level=debug') | |
356 | |
357 service_account_args = [] | |
358 service_account_json = _get_service_account_json(rt, opts) | |
359 if service_account_json: | |
360 service_account_args += ['-service-account-json', service_account_json] | |
361 | |
362 streamserver_uri_gen = rt.logdog_butler_streamserver_gen | |
363 if not streamserver_uri_gen: | |
364 raise LogDogBootstrapError('No streamserver URI generator.') | |
365 streamserver_uri = streamserver_uri_gen(rt.workdir) | |
366 | |
367 # Dump Annotee command to JSON. | |
368 cmd_json = os.path.join(bootstrap_dir, 'annotee_cmd.json') | |
369 with open(cmd_json, 'w') as fd: | |
370 json.dump(cmd, fd) | |
371 | |
372 cmd = [ | |
373 # Butler Command. | |
374 butler, | |
375 ] + logdog_verbose + service_account_args + [ | |
376 '-output', 'gcps,project="%s",topic="%s"' % (rt.logdog_pubsub.project, | |
377 rt.logdog_pubsub.topic), | |
378 'run', | |
379 '-streamserver-uri', streamserver_uri, | |
380 '--', | |
381 | |
382 # Annotee Command. | |
383 annotee, | |
384 ] + logdog_verbose + [ | |
385 '-json-args-path', cmd_json, | |
386 ] | |
387 rv, _ = _run_command(cmd, dry_run=opts.dry_run) | |
388 if rv in LOGDOG_ERROR_RETURNCODES: | |
389 raise LogDogBootstrapError('LogDog Error (%d)' % (rv,)) | |
390 return rv | |
391 | |
392 | |
393 def _assert_logdog_whitelisted(rt): | |
394 """Asserts that the runtime environment is whitelisted for LogDog bootstrap. | |
395 | |
396 Args: | |
397 rt (RecipeRuntime): The runtime to test. | |
398 Raises: | |
399 LogDogNotBootstrapped: if the runtime is not whitelisted. | |
400 """ | |
401 mastername, buildername = rt.get('mastername'), rt.get('buildername') | |
402 if not all((mastername, buildername)): | |
403 raise LogDogNotBootstrapped('Required mastername/buildernmae is not set.') | |
404 | |
405 # Key on mastername. | |
406 bdict = LOGDOG_WHITELIST_MASTER_BUILDERS.get(mastername) | |
407 if bdict is not None: | |
408 # Key on buildername. | |
409 if WHITELIST_ALL in bdict or buildername in bdict: | |
410 LOGGER.info('Whitelisted master %s, builder %s.', | |
411 mastername, buildername) | |
412 return | |
413 raise LogDogNotBootstrapped('Master %s, builder %s is not whitelisted.' % ( | |
414 mastername, buildername)) | |
52 | 415 |
53 | 416 |
54 def get_recipe_properties(build_properties, use_factory_properties_from_disk): | 417 def get_recipe_properties(build_properties, use_factory_properties_from_disk): |
55 """Constructs the recipe's properties from buildbot's properties. | 418 """Constructs the recipe's properties from buildbot's properties. |
56 | 419 |
57 This retrieves the current factory properties from the master_config | 420 This retrieves the current factory properties from the master_config |
58 in the slave's checkout (no factory properties are handed to us from the | 421 in the slave's checkout (no factory properties are handed to us from the |
59 master), and merges in the build properties. | 422 master), and merges in the build properties. |
60 | 423 |
61 Using the values from the checkout allows us to do things like change | 424 Using the values from the checkout allows us to do things like change |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
106 for name, value in factory_properties.items(): | 469 for name, value in factory_properties.items(): |
107 if not build_properties.has_key(name): | 470 if not build_properties.has_key(name): |
108 s.set_build_property(name, json.dumps(value)) | 471 s.set_build_property(name, json.dumps(value)) |
109 | 472 |
110 # Build properties override factory properties. | 473 # Build properties override factory properties. |
111 properties = factory_properties.copy() | 474 properties = factory_properties.copy() |
112 properties.update(build_properties) | 475 properties.update(build_properties) |
113 return properties | 476 return properties |
114 | 477 |
115 | 478 |
116 def get_factory_properties_from_disk(mastername, buildername): | 479 def get_factory_properties_from_disk(rt): |
117 master_list = master_cfg_utils.GetMasters() | 480 master_list = master_cfg_utils.GetMasters() |
118 master_path = None | 481 master_path = None |
119 for name, path in master_list: | 482 for name, path in master_list: |
120 if name == mastername: | 483 if name == rt.mastername: |
121 master_path = path | 484 master_path = path |
122 | 485 |
123 if not master_path: | 486 if not master_path: |
124 raise LookupError('master "%s" not found.' % mastername) | 487 raise LookupError('master "%s" not found.' % rt.mastername) |
125 | 488 |
126 script_path = os.path.join(BUILD_ROOT, 'scripts', 'tools', | 489 script_path = os.path.join(BUILD_ROOT, 'scripts', 'tools', |
127 'dump_master_cfg.py') | 490 'dump_master_cfg.py') |
128 | 491 |
129 with namedTempFile() as fname: | 492 master_json = os.path.join(rt.workdir, 'dump_master_cfg.json') |
130 dump_cmd = [sys.executable, | 493 dump_cmd = [sys.executable, |
131 script_path, | 494 script_path, |
132 master_path, fname] | 495 master_path, master_json] |
133 proc = subprocess.Popen(dump_cmd, cwd=BUILD_ROOT, stdout=subprocess.PIPE, | 496 proc = subprocess.Popen(dump_cmd, cwd=BUILD_ROOT, stdout=subprocess.PIPE, |
134 stderr=subprocess.PIPE) | 497 stderr=subprocess.PIPE) |
135 out, err = proc.communicate() | 498 out, err = proc.communicate() |
136 exit_code = proc.returncode | 499 if proc.returncode: |
500 raise LookupError('Failed to get the master config; dump_master_cfg %s' | |
501 'returned %d):\n%s\n%s\n'% ( | |
502 rt.mastername, proc.returncode, out, err)) | |
137 | 503 |
138 if exit_code: | 504 with open(master_json, 'rU') as f: |
139 raise LookupError('Failed to get the master config; dump_master_cfg %s' | 505 config = json.load(f) |
140 'returned %d):\n%s\n%s\n'% ( | |
141 mastername, exit_code, out, err)) | |
142 | |
143 with open(fname, 'rU') as f: | |
144 config = json.load(f) | |
145 | 506 |
146 # Now extract just the factory properties for the requested builder | 507 # Now extract just the factory properties for the requested builder |
147 # from the master config. | 508 # from the master config. |
148 props = {} | 509 props = {} |
149 found = False | 510 found = False |
150 for builder_dict in config['builders']: | 511 for builder_dict in config['builders']: |
151 if builder_dict['name'] == buildername: | 512 if builder_dict['name'] == rt.buildername: |
152 found = True | 513 found = True |
153 factory_properties = builder_dict['factory']['properties'] | 514 factory_properties = builder_dict['factory']['properties'] |
154 for name, (value, _) in factory_properties.items(): | 515 for name, (value, _) in factory_properties.items(): |
155 props[name] = value | 516 props[name] = value |
156 | 517 |
157 if not found: | 518 if not found: |
158 raise LookupError('builder "%s" not found on in master "%s"' % | 519 raise LookupError('builder "%s" not found on in master "%s"' % |
159 (buildername, mastername)) | 520 (rt.buildername, rt.mastername)) |
160 | 521 |
161 if 'recipe' not in props: | 522 if 'recipe' not in props: |
162 raise LookupError('Cannot find recipe for %s on %s' % | 523 raise LookupError('Cannot find recipe for %s on %s' % |
163 (buildername, mastername)) | 524 (rt.buildername, rt.mastername)) |
164 | 525 |
165 return props | 526 return props |
166 | 527 |
167 | 528 |
168 def get_args(argv): | 529 def get_args(argv): |
169 """Process command-line arguments.""" | 530 """Process command-line arguments.""" |
170 parser = argparse.ArgumentParser( | 531 parser = argparse.ArgumentParser( |
171 description='Entry point for annotated builds.') | 532 description='Entry point for annotated builds.') |
533 parser.add_argument('-v', '--verbose', | |
534 action='count', default=0, | |
535 help='Increase verbosity. This can be specified multiple times.') | |
536 parser.add_argument('-d', '--dry-run', action='store_true', | |
537 help='Perform the setup, but refrain from executing the recipe.') | |
538 parser.add_argument('-l', '--leak', action='store_true', | |
539 help="Refrain from cleaning up generated artifacts.") | |
172 parser.add_argument('--build-properties', | 540 parser.add_argument('--build-properties', |
173 type=json.loads, default={}, | 541 type=json.loads, default={}, |
174 help='build properties in JSON format') | 542 help='build properties in JSON format') |
175 parser.add_argument('--factory-properties', | 543 parser.add_argument('--factory-properties', |
176 type=json.loads, default={}, | 544 type=json.loads, default={}, |
177 help='factory properties in JSON format') | 545 help='factory properties in JSON format') |
178 parser.add_argument('--build-properties-gz', dest='build_properties', | 546 parser.add_argument('--build-properties-gz', dest='build_properties', |
179 type=chromium_utils.convert_gz_json_type, default={}, | 547 type=chromium_utils.convert_gz_json_type, default={}, |
180 help='build properties in b64 gz JSON format') | 548 help='build properties in b64 gz JSON format') |
181 parser.add_argument('--factory-properties-gz', dest='factory_properties', | 549 parser.add_argument('--factory-properties-gz', dest='factory_properties', |
182 type=chromium_utils.convert_gz_json_type, default={}, | 550 type=chromium_utils.convert_gz_json_type, default={}, |
183 help='factory properties in b64 gz JSON format') | 551 help='factory properties in b64 gz JSON format') |
184 parser.add_argument('--keep-stdin', action='store_true', default=False, | 552 parser.add_argument('--keep-stdin', action='store_true', default=False, |
185 help='don\'t close stdin when running recipe steps') | 553 help='don\'t close stdin when running recipe steps') |
186 parser.add_argument('--master-overrides-slave', action='store_true', | 554 parser.add_argument('--master-overrides-slave', action='store_true', |
187 help='use the property values given on the command line from the master, ' | 555 help='use the property values given on the command line from the master, ' |
188 'not the ones looked up on the slave') | 556 'not the ones looked up on the slave') |
189 parser.add_argument('--use-factory-properties-from-disk', | 557 parser.add_argument('--use-factory-properties-from-disk', |
190 action='store_true', default=False, | 558 action='store_true', default=False, |
191 help='use factory properties loaded from disk on the slave') | 559 help='use factory properties loaded from disk on the slave') |
560 | |
561 group = parser.add_argument_group('LogDog Bootstrap') | |
562 group.add_argument('-V', '--logdog-verbose', | |
563 action='count', default=0, | |
564 help='Increase LogDog verbosity. This can be specified multiple times.') | |
565 group.add_argument('-f', '--logdog-force', action='store_true', | |
566 help='Force LogDog bootstrapping, even if the system is not configured.') | |
567 group.add_argument('--logdog-butler-path', | |
568 help='Path to the LogDog Butler. If empty, one will be probed/downloaded ' | |
569 'from CIPD.') | |
570 group.add_argument('--logdog-annotee-path', | |
571 help='Path to the LogDog Annotee. If empty, one will be ' | |
572 'probed/downloaded from CIPD.') | |
573 group.add_argument('--logdog-service-account-json', | |
574 help='Path to the service account JSON. If one is not provided, the ' | |
575 'local system credentials will be used.') | |
576 | |
192 return parser.parse_args(argv) | 577 return parser.parse_args(argv) |
193 | 578 |
194 | 579 |
195 def update_scripts(): | 580 def update_scripts(): |
196 if os.environ.get('RUN_SLAVE_UPDATED_SCRIPTS'): | 581 if os.environ.get('RUN_SLAVE_UPDATED_SCRIPTS'): |
197 os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS') | 582 os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS') |
198 return False | 583 return False |
199 | 584 |
200 stream = annotator.StructuredAnnotationStream() | 585 stream = annotator.StructuredAnnotationStream() |
201 | 586 |
202 with stream.step('update_scripts') as s: | 587 with stream.step('update_scripts') as s: |
203 gclient_name = 'gclient' | 588 gclient_name = 'gclient' |
204 if sys.platform.startswith('win'): | 589 if sys.platform.startswith('win'): |
205 gclient_name += '.bat' | 590 gclient_name += '.bat' |
206 gclient_path = os.path.join(BUILD_ROOT, '..', 'depot_tools', gclient_name) | 591 gclient_path = os.path.join(BUILD_ROOT, '..', 'depot_tools', gclient_name) |
207 gclient_cmd = [gclient_path, 'sync', '--force', '--verbose'] | 592 gclient_cmd = [gclient_path, 'sync', '--force', '--verbose'] |
208 try: | 593 try: |
209 fd, output_json = tempfile.mkstemp() | 594 fd, output_json = tempfile.mkstemp() |
210 os.close(fd) | 595 os.close(fd) |
211 gclient_cmd += ['--output-json', output_json] | 596 gclient_cmd += ['--output-json', output_json] |
212 except Exception: | 597 except Exception: |
213 # Super paranoia try block. | 598 # Super paranoia try block. |
214 output_json = None | 599 output_json = None |
215 cmd_dict = { | 600 cmd_dict = { |
216 'name': 'update_scripts', | 601 'name': 'update_scripts', |
217 'cmd': gclient_cmd, | 602 'cmd': gclient_cmd, |
218 'cwd': BUILD_ROOT, | 603 'cwd': BUILD_ROOT, |
219 } | 604 } |
220 annotator.print_step(cmd_dict, os.environ, stream) | 605 annotator.print_step(cmd_dict, os.environ, stream) |
221 if subprocess.call(gclient_cmd, cwd=BUILD_ROOT) != 0: | 606 rv, _ = _run_command(gclient_cmd, cwd=BUILD_ROOT) |
pgervais
2015/12/01 18:58:28
Forgotten dry_run kwarg?
dnj
2015/12/01 22:39:55
This happens before arguments get parsed. The dry
| |
607 if rv != 0: | |
222 s.step_text('gclient sync failed!') | 608 s.step_text('gclient sync failed!') |
223 s.step_warnings() | 609 s.step_warnings() |
224 elif output_json: | 610 elif output_json: |
225 try: | 611 try: |
226 with open(output_json, 'r') as f: | 612 with open(output_json, 'r') as f: |
227 gclient_json = json.load(f) | 613 gclient_json = json.load(f) |
228 for line in json.dumps( | 614 for line in json.dumps( |
229 gclient_json, sort_keys=True, | 615 gclient_json, sort_keys=True, |
230 indent=4, separators=(',', ': ')).splitlines(): | 616 indent=4, separators=(',', ': ')).splitlines(): |
231 s.step_log_line('gclient_json', line) | 617 s.step_log_line('gclient_json', line) |
232 s.step_log_end('gclient_json') | 618 s.step_log_end('gclient_json') |
233 revision = gclient_json['solutions']['build/']['revision'] | 619 revision = gclient_json['solutions']['build/']['revision'] |
234 scm = gclient_json['solutions']['build/']['scm'] | 620 scm = gclient_json['solutions']['build/']['scm'] |
235 s.step_text('%s - %s' % (scm, revision)) | 621 s.step_text('%s - %s' % (scm, revision)) |
236 s.set_build_property('build_scm', json.dumps(scm)) | 622 s.set_build_property('build_scm', json.dumps(scm)) |
237 s.set_build_property('build_revision', json.dumps(revision)) | 623 s.set_build_property('build_revision', json.dumps(revision)) |
238 except Exception as e: | 624 except Exception as e: |
239 s.step_text('Unable to process gclient JSON %s' % repr(e)) | 625 s.step_text('Unable to process gclient JSON %s' % repr(e)) |
240 s.step_warnings() | 626 s.step_warnings() |
241 finally: | 627 finally: |
242 try: | 628 try: |
243 os.remove(output_json) | 629 os.remove(output_json) |
244 except Exception as e: | 630 except Exception as e: |
245 print >> sys.stderr, "LEAKED:", output_json, e | 631 LOGGER.warning("LEAKED: %s", output_json, exc_info=True) |
246 else: | 632 else: |
247 s.step_text('Unable to get SCM data') | 633 s.step_text('Unable to get SCM data') |
248 s.step_warnings() | 634 s.step_warnings() |
249 | 635 |
250 os.environ['RUN_SLAVE_UPDATED_SCRIPTS'] = '1' | 636 os.environ['RUN_SLAVE_UPDATED_SCRIPTS'] = '1' |
251 | 637 |
252 # After running update scripts, set PYTHONIOENCODING=UTF-8 for the real | 638 # After running update scripts, set PYTHONIOENCODING=UTF-8 for the real |
253 # annotated_run. | 639 # annotated_run. |
254 os.environ['PYTHONIOENCODING'] = 'UTF-8' | 640 os.environ['PYTHONIOENCODING'] = 'UTF-8' |
255 | 641 |
256 return True | 642 return True |
257 | 643 |
258 | 644 |
259 def clean_old_recipe_engine(): | 645 def clean_old_recipe_engine(): |
260 """Clean stale pycs from the old location of recipe_engine. | 646 """Clean stale pycs from the old location of recipe_engine. |
261 | 647 |
262 This function should only be needed for a little while after the recipe | 648 This function should only be needed for a little while after the recipe |
263 packages rollout (2015-09-16). | 649 packages rollout (2015-09-16). |
264 """ | 650 """ |
265 for (dirpath, _, filenames) in os.walk( | 651 for (dirpath, _, filenames) in os.walk( |
266 os.path.join(BUILD_ROOT, 'third_party', 'recipe_engine')): | 652 os.path.join(BUILD_ROOT, 'third_party', 'recipe_engine')): |
267 for filename in filenames: | 653 for filename in filenames: |
268 if filename.endswith('.pyc'): | 654 if filename.endswith('.pyc'): |
269 path = os.path.join(dirpath, filename) | 655 os.remove(os.path.join(dirpath, filename)) |
270 os.remove(path) | |
271 | 656 |
272 | 657 |
273 @contextlib.contextmanager | 658 def write_monitoring_event(rt, outdir): |
274 def build_data_directory(): | 659 if not (rt.run_cmd and os.path.exists(rt.run_cmd)): |
275 """Context manager that creates a build-specific directory. | 660 LOGGER.warning('Unable to find run.py at %s, no events will be sent.', |
661 rt.run_cmd) | |
662 return | |
276 | 663 |
277 The directory is wiped when exiting. | 664 hostname = socket.getfqdn() |
665 if hostname: # just in case getfqdn() returns None. | |
666 hostname = hostname.split('.')[0] | |
667 else: | |
668 hostname = None | |
278 | 669 |
279 Yields: | 670 try: |
280 build_data (str or None): full path to a writeable directory. Return None if | 671 cmd = [rt.run_cmd, 'infra.tools.send_monitoring_event', |
281 no directory can be found or if it's not writeable. | 672 '--event-mon-output-file', |
282 """ | 673 os.path.join(outdir, 'log_request_proto'), |
283 prefix = 'build_data' | 674 '--event-mon-run-type', 'file', |
284 | 675 '--event-mon-service-name', |
285 # TODO(pgervais): import that from infra_libs.logs instead | 676 'buildbot/master/master.%s' |
286 if sys.platform.startswith('win'): # pragma: no cover | 677 % rt.get('mastername', 'UNKNOWN'), |
287 DEFAULT_LOG_DIRECTORIES = [ | 678 '--build-event-build-name', |
288 'E:\\chrome-infra-logs', | 679 rt.get('buildername', 'UNKNOWN'), |
289 'C:\\chrome-infra-logs', | 680 '--build-event-build-number', |
290 ] | 681 str(rt.get('buildnumber', 0)), |
291 else: | 682 '--build-event-build-scheduling-time', |
292 DEFAULT_LOG_DIRECTORIES = ['/var/log/chrome-infra'] | 683 str(1000*int(rt.get('requestedAt', 0))), |
293 | 684 '--build-event-type', 'BUILD', |
294 build_data_dir = None | 685 '--event-mon-timestamp-kind', 'POINT', |
295 for candidate in DEFAULT_LOG_DIRECTORIES: | 686 # And use only defaults for credentials. |
296 if os.path.isdir(candidate): | 687 ] |
297 build_data_dir = os.path.join(candidate, prefix) | 688 # Add this conditionally so that we get an error in |
298 break | 689 # send_monitoring_event log files in case it isn't present. |
299 | 690 if hostname: |
300 # Remove any leftovers and recreate the dir. | 691 cmd += ['--build-event-hostname', hostname] |
301 if build_data_dir: | 692 _check_command(cmd) |
302 print >> sys.stderr, "Creating directory" | 693 except Exception: |
303 # TODO(pgervais): use infra_libs.rmtree instead. | 694 LOGGER.warning("Failed to send monitoring event.", exc_info=True) |
304 if os.path.exists(build_data_dir): | |
305 try: | |
306 shutil.rmtree(build_data_dir) | |
307 except Exception as exc: | |
308 # Catching everything: we don't want to break any builds for that reason | |
309 print >> sys.stderr, ( | |
310 "FAILURE: path can't be deleted: %s.\n%s" % (build_data_dir, str(exc)) | |
311 ) | |
312 print >> sys.stderr, "Creating directory" | |
313 | |
314 if not os.path.exists(build_data_dir): | |
315 try: | |
316 os.mkdir(build_data_dir) | |
317 except Exception as exc: | |
318 print >> sys.stderr, ( | |
319 "FAILURE: directory can't be created: %s.\n%s" % | |
320 (build_data_dir, str(exc)) | |
321 ) | |
322 build_data_dir = None | |
323 | |
324 # Under this line build_data_dir should point to an existing empty dir | |
325 # or be None. | |
326 yield build_data_dir | |
327 | |
328 # Clean up after ourselves | |
329 if build_data_dir: | |
330 # TODO(pgervais): use infra_libs.rmtree instead. | |
331 try: | |
332 shutil.rmtree(build_data_dir) | |
333 except Exception as exc: | |
334 # Catching everything: we don't want to break any builds for that reason. | |
335 print >> sys.stderr, ( | |
336 "FAILURE: path can't be deleted: %s.\n%s" % (build_data_dir, str(exc)) | |
337 ) | |
338 | 695 |
339 | 696 |
340 def main(argv): | 697 def main(argv): |
341 opts = get_args(argv) | 698 opts = get_args(argv) |
699 | |
700 if opts.verbose == 0: | |
701 level = logging.INFO | |
702 else: | |
703 level = logging.DEBUG | |
pgervais
2015/12/01 18:58:28
Can we have intermediate log levels as well?
dnj
2015/12/01 22:39:55
Such as? I don't think there's anything between IN
| |
704 logging.getLogger().setLevel(level) | |
705 | |
342 # TODO(crbug.com/551165): remove flag "factory_properties". | 706 # TODO(crbug.com/551165): remove flag "factory_properties". |
343 use_factory_properties_from_disk = (opts.use_factory_properties_from_disk or | 707 use_factory_properties_from_disk = (opts.use_factory_properties_from_disk or |
344 bool(opts.factory_properties)) | 708 bool(opts.factory_properties)) |
345 properties = get_recipe_properties( | 709 properties = get_recipe_properties( |
346 opts.build_properties, use_factory_properties_from_disk) | 710 opts.build_properties, use_factory_properties_from_disk) |
347 | 711 |
348 clean_old_recipe_engine() | 712 clean_old_recipe_engine() |
349 | 713 |
350 # Find out if the recipe we intend to run is in build_internal's recipes. If | 714 # Enter our runtime enviornment. |
351 # so, use recipes.py from there, otherwise use the one from build. | 715 with RecipeRuntime.enter(opts.leak, **properties) as rt: |
352 recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py' | 716 LOGGER.debug('Loaded runtime: %s', rt) |
353 if os.path.exists(os.path.join(BUILD_LIMITED_ROOT, 'recipes', recipe_file)): | |
354 recipe_runner = os.path.join(BUILD_LIMITED_ROOT, 'recipes.py') | |
355 else: | |
356 recipe_runner = os.path.join(SCRIPT_PATH, 'recipes.py') | |
357 | 717 |
358 with build_data_directory() as build_data_dir: | 718 # Find out if the recipe we intend to run is in build_internal's recipes. If |
359 # Create a LogRequestLite proto containing this build's information. | 719 # so, use recipes.py from there, otherwise use the one from build. |
360 if build_data_dir: | 720 recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py' |
361 properties['build_data_dir'] = build_data_dir | 721 if os.path.exists(os.path.join(BUILD_LIMITED_ROOT, 'recipes', recipe_file)): |
722 recipe_runner = os.path.join(BUILD_LIMITED_ROOT, 'recipes.py') | |
723 else: | |
724 recipe_runner = os.path.join(SCRIPT_PATH, 'recipes.py') | |
362 | 725 |
363 hostname = socket.getfqdn() | 726 # Setup monitoring directory and send a monitoring event. |
364 if hostname: # just in case getfqdn() returns None. | 727 build_data_dir = ensure_directory(rt.workdir, 'build_data') |
365 hostname = hostname.split('.')[0] | 728 properties['build_data_dir'] = build_data_dir |
366 else: | |
367 hostname = None | |
368 | 729 |
369 if RUN_CMD and os.path.exists(RUN_CMD): | 730 # Write our annotated_run.py monitoring event. |
370 try: | 731 write_monitoring_event(rt, build_data_dir) |
371 cmd = [RUN_CMD, 'infra.tools.send_monitoring_event', | |
372 '--event-mon-output-file', | |
373 os.path.join(build_data_dir, 'log_request_proto'), | |
374 '--event-mon-run-type', 'file', | |
375 '--event-mon-service-name', | |
376 'buildbot/master/master.%s' | |
377 % properties.get('mastername', 'UNKNOWN'), | |
378 '--build-event-build-name', | |
379 properties.get('buildername', 'UNKNOWN'), | |
380 '--build-event-build-number', | |
381 str(properties.get('buildnumber', 0)), | |
382 '--build-event-build-scheduling-time', | |
383 str(1000*int(properties.get('requestedAt', 0))), | |
384 '--build-event-type', 'BUILD', | |
385 '--event-mon-timestamp-kind', 'POINT', | |
386 # And use only defaults for credentials. | |
387 ] | |
388 # Add this conditionally so that we get an error in | |
389 # send_monitoring_event log files in case it isn't present. | |
390 if hostname: | |
391 cmd += ['--build-event-hostname', hostname] | |
392 subprocess.call(cmd) | |
393 except Exception: | |
394 print >> sys.stderr, traceback.format_exc() | |
395 | 732 |
396 else: | 733 # Dump properties to JSON and build recipe command. |
397 print >> sys.stderr, ( | 734 props_file = os.path.join(rt.workdir, 'recipe_properties.json') |
398 'WARNING: Unable to find run.py at %r, no events will be sent.' | 735 with open(props_file, 'w') as fh: |
pgervais
2015/12/01 18:58:28
'wb' ?
dnj
2015/12/01 22:39:55
It's JSON, which is text, not binary.
| |
399 % str(RUN_CMD) | 736 json.dump(properties, fh) |
400 ) | 737 cmd = [ |
738 sys.executable, '-u', recipe_runner, | |
739 'run', | |
740 '--workdir=%s' % os.getcwd(), | |
741 '--properties-file=%s' % props_file, | |
742 rt.recipe, | |
743 ] | |
401 | 744 |
402 with namedTempFile() as props_file: | 745 status = None |
403 with open(props_file, 'w') as fh: | 746 try: |
404 fh.write(json.dumps(properties)) | 747 if not opts.logdog_force: |
405 cmd = [ | 748 _assert_logdog_whitelisted(rt) |
406 sys.executable, '-u', recipe_runner, | |
407 'run', | |
408 '--workdir=%s' % os.getcwd(), | |
409 '--properties-file=%s' % props_file, | |
410 properties['recipe'] ] | |
411 status = subprocess.call(cmd) | |
412 | 749 |
413 # TODO(pgervais): Send events from build_data_dir to the endpoint. | 750 status = _logdog_bootstrap(rt, opts, cmd) |
751 except LogDogNotBootstrapped as e: | |
752 LOGGER.info('Not bootstrapped: %s', e.message) | |
753 except LogDogBootstrapError as e: | |
754 LOGGER.warning('Could not bootstrap LogDog: %s', e.message) | |
755 except Exception: | |
756 LOGGER.exception('Exception while bootstrapping LogDog.') | |
757 finally: | |
758 if status is None: | |
759 LOGGER.info('Not using LogDog. Invoking `annotated_run.py` directly.') | |
760 status, _ = _run_command(cmd, dry_run=opts.dry_run) | |
761 | |
762 # TODO(pgervais): Send events from build_data_dir to the endpoint. | |
pgervais
2015/12/01 18:58:28
Please remove this comment, it's obsolete.
dnj
2015/12/01 22:39:55
Done.
| |
414 return status | 763 return status |
415 | 764 |
765 | |
416 def shell_main(argv): | 766 def shell_main(argv): |
417 if update_scripts(): | 767 if update_scripts(): |
418 return subprocess.call([sys.executable] + argv) | 768 # Re-execute with the updated annotated_run.py. |
769 rv, _ = _run_command([sys.executable] + argv) | |
770 return rv | |
419 else: | 771 else: |
420 return main(argv[1:]) | 772 return main(argv[1:]) |
421 | 773 |
422 | 774 |
423 if __name__ == '__main__': | 775 if __name__ == '__main__': |
776 logging.basicConfig(level=logging.INFO) | |
424 sys.exit(shell_main(sys.argv)) | 777 sys.exit(shell_main(sys.argv)) |
OLD | NEW |