Index: scripts/slave/annotated_run.py |
diff --git a/scripts/slave/annotated_run.py b/scripts/slave/annotated_run.py |
index ab4bc1f9e95625bc0f227932ad1b1d865b8a52a5..070d0f1fa704a6f06744bbad3f041d763b42df77 100755 |
--- a/scripts/slave/annotated_run.py |
+++ b/scripts/slave/annotated_run.py |
@@ -3,52 +3,376 @@ |
# Use of this source code is governed by a BSD-style license that can be |
# found in the LICENSE file. |
+import argparse |
+import collections |
import contextlib |
+import datetime |
+import hashlib |
+import itertools |
import json |
-import optparse |
+import logging |
import os |
+import platform |
import shutil |
import socket |
import subprocess |
import sys |
import tempfile |
-import traceback |
+ |
+# Install Infra build environment. |
BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( |
- os.path.abspath(__file__)))) |
-sys.path.append(os.path.join(BUILD_ROOT, 'scripts')) |
-sys.path.append(os.path.join(BUILD_ROOT, 'third_party')) |
+ os.path.abspath(__file__)))) |
+sys.path.insert(0, os.path.join(BUILD_ROOT, 'scripts')) |
+import common.env |
+common.env.Install() |
from common import annotator |
from common import chromium_utils |
from common import master_cfg_utils |
- |
-SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) |
-BUILD_LIMITED_ROOT = os.path.join( |
- os.path.dirname(BUILD_ROOT), 'build_internal', 'scripts', 'slave') |
- |
-PACKAGE_CFG = os.path.join( |
- os.path.dirname(os.path.dirname(SCRIPT_PATH)), |
- 'infra', 'config', 'recipes.cfg') |
- |
-if sys.platform.startswith('win'): |
- # TODO(pgervais): add windows support |
- # QQ: Where is infra/run.py on windows machines? |
- RUN_CMD = None |
-else: |
- RUN_CMD = os.path.join('/', 'opt', 'infra-python', 'run.py') |
- |
-@contextlib.contextmanager |
-def namedTempFile(): |
- fd, name = tempfile.mkstemp() |
- os.close(fd) # let the exceptions fly |
- try: |
- yield name |
- finally: |
+from gerrit_util import GceAuthenticator |
+ |
+SCRIPT_PATH = os.path.join(common.env.Build, 'scripts', 'slave') |
+BUILD_LIMITED_ROOT = os.path.join(common.env.BuildInternal, 'scripts', 'slave') |
+ |
+# Logging instance. |
+LOGGER = logging.getLogger('annotated_run') |
+ |
+# Return codes used by Butler/Annotee to indicate their failure (as opposed to |
+# a forwarded return code from the underlying process). |
+LOGDOG_ERROR_RETURNCODES = ( |
iannucci
2015/12/01 02:38:01
I think the logdog stuff should be in a different
|
+ # Butler runtime error. |
+ 250, |
+ # Annotee runtime error. |
+ 251, |
+) |
+ |
+# Whitelist of {master}=>[{builder}|WHITELIST_ALL] whitelisting specific masters |
iannucci
2015/12/01 02:38:01
is {builder} a set?
dnj
2015/12/01 03:36:04
I was using that notation to mean "key", but I can
|
+# and builders for experimental LogDog/Annotee export. |
+LOGDOG_WHITELIST_MASTER_BUILDERS = { |
+} |
+ |
+# Sentinel value that, if present in master config, matches all builders |
+# underneath that master. |
+WHITELIST_ALL = '*' |
+ |
+# Configuration for a Pub/Sub topic. |
+PubSubConfig = collections.namedtuple('PubSubConfig', ('project', 'topic')) |
+ |
+# RecipeRuntime will probe this for values. |
+# - First, (system, platform) |
+# - Then, (system,) |
+# - Finally, (), |
+PLATFORM_CONFIG = { |
+ # All systems. |
+ (): { |
+ 'logdog_pubsub': PubSubConfig( |
+ project='luci-logdog', |
+ topic='chrome-infra-beta', |
+ ), |
+ }, |
+ |
+ # Linux |
+ ('Linux',): { |
+ 'run_cmd': '/opt/infra-python/run.py', |
+ 'cipd_static_paths': ( |
+ # XXX: Get this right? |
+ '/opt/chrome-infra', |
+ ), |
+ 'credential_paths': ( |
+ # XXX: Get this right? |
+ '/opt/infra/service_accounts', |
+ ), |
+ 'logdog_butler_streamserver_gen': lambda rt: os.path.join(rt.workdir, |
+ 'butler.sock'), |
+ }, |
+ ('Linux', 'x86_64'): { |
+ 'logdog_cipd_packages': { |
+ 'infra/tools/luci/logdog/butler/linux-amd64': 'testing', |
+ 'infra/tools/luci/logdog/annotee/linux-amd64': 'testing', |
+ }, |
+ }, |
+ |
+ # Mac OSX |
+ ('Darwin',): { |
+ 'run_cmd': '/opt/infra-python/run.py', |
+ }, |
+ |
+ # Windows |
+ ('Windows',): {}, |
+} |
+ |
+ |
+class LogDogNotBootstrapped(Exception): |
+ pass |
+ |
+ |
+class LogDogBootstrapError(Exception): |
+ pass |
+ |
+ |
+def is_executable(path): |
+ return os.path.isfile(path) and os.access(path, os.X_OK) |
+ |
+ |
+def ensure_directory(*path): |
+ path = os.path.join(*path) |
+ if not os.path.isdir(path): |
+ os.makedirs(path) |
+ return path |
+ |
+ |
+def _run_command(cmd, **kwargs): |
+ dry_run = kwargs.pop('dry_run', False) |
+ |
+ LOGGER.debug('Executing command: %s', cmd) |
+ if dry_run: |
+ LOGGER.info('(Dry Run) Not executing command.') |
+ return 0, '' |
+ proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT) |
+ stdout, _ = proc.communicate() |
+ |
+ LOGGER.debug('Process [%s] returned [%d] with output:\n%s', |
+ cmd, proc.returncode, stdout) |
+ return proc.returncode, stdout |
+ |
+ |
+def _check_command(*args, **kwargs): |
+ rv, stdout = _run_command(args, **kwargs) |
+ if rv != 0: |
+ raise ValueError('Process exited with non-zero return code (%d)' % (rv,)) |
+ return stdout |
+ |
+ |
+class RecipeRuntime(object): |
+ """RecipeRuntime is the platform-specific runtime enviornment.""" |
+ |
+ _SENTINEL = object() |
+ |
+ def __init__(self, **kwargs): |
+ self._fields = kwargs |
+ |
+ @classmethod |
+ @contextlib.contextmanager |
+ def enter(cls, leak, **kw): |
iannucci
2015/12/01 02:38:01
what is leak? docstring?
dnj
2015/12/01 03:36:04
Done.
|
+ # Build our platform fields. |
+ p = (platform.system(), platform.processor()) |
+ fields = {} |
+ for i in xrange(len(p)+1): |
+ fields.update(PLATFORM_CONFIG.get(p[:i], {})) |
+ fields.update(kw) |
iannucci
2015/12/01 02:38:01
why not put this stuff in __init__? why is there `
dnj
2015/12/01 03:36:04
In general, I don't like __init__ doing actual wor
|
+ |
+ basedir = ensure_directory(os.getcwd(), '.recipe_runtime') |
try: |
- os.remove(name) |
- except OSError as e: |
- print >> sys.stderr, "LEAK: %s: %s" % (name, e) |
+ tdir = tempfile.mkdtemp(dir=basedir) |
+ LOGGER.debug('Using temporary directory [%s].', tdir) |
+ |
+ fields['workdir'] = tdir |
+ yield cls(**fields) |
+ finally: |
+ if basedir and os.path.isdir(basedir): |
+ if not leak: |
+ LOGGER.debug('Cleaning up temporary directory [%s].', basedir) |
+ try: |
+ # TODO(pgervais): use infra_libs.rmtree instead. |
+ shutil.rmtree(basedir) |
+ except Exception: |
+ LOGGER.exception('Failed to clean up temporary directory [%s].', |
+ basedir) |
+ else: |
+ LOGGER.warning('(--leak) Leaking temporary directory [%s].', basedir) |
+ |
+ def __getattr__(self, key): |
iannucci
2015/12/01 02:38:01
this seems unnecessarily fancy... why not just use
dnj
2015/12/01 03:36:04
I dunno, I think this usage looks way better.
|
+ # Class methods/variables. |
+ value = getattr(super(RecipeRuntime, self), key, self._SENTINEL) |
+ if value is not self._SENTINEL: |
+ return value |
+ |
+ value = getattr(self, 'get')(key, self._SENTINEL) |
+ if value is not self._SENTINEL: |
+ return value |
+ raise KeyError(key) |
+ |
+ def get(self, key, default=None): |
+ value = self._fields.get(key, self._SENTINEL) |
+ if value is not self._SENTINEL: |
+ return value |
+ return default |
+ |
+ def __str__(self): |
+ return str(self._fields) |
+ |
+ |
+class CIPD(object): |
iannucci
2015/12/01 02:38:01
separate file?
dnj
2015/12/01 03:36:04
I wasn't intending to make this a first-class libr
|
+ _CIPD_NAME = 'cipd' |
+ |
+ def __init__(self, path, root): |
+ self._cipd_path = path |
+ self._root = root |
+ |
+ @classmethod |
+ def find(cls, rt, rootdir): |
+ for p in itertools.chain( |
+ iter(os.environ.get('PATH').split(os.pathsep)), |
+ rt.cipd_static_paths): |
+ candidate = os.path.join(p, cls._CIPD_NAME) |
+ if os.path.isfile(candidate) and os.access(candidate, os.X_OK): |
+ return cls(candidate, rootdir) |
+ return None |
+ |
+ def __call__(self, *args): |
+ cmd = [self._cipd_path] |
+ cmd.extend(args) |
+ _check_command(*cmd) |
+ |
+ def path(self, *components): |
+ return os.path.join(self._root, *components) |
+ |
+ def ensure(self, **packages): |
+ if len(packages) == 0: |
+ return |
+ |
+ # Emit package list. |
+ package_list = self.path('package_list.txt') |
+ lines = [ |
+ '# Automatically generated CIPD package list (launcher.py)', |
+ '# Generated at: %s' % (datetime.datetime.now().isoformat(),), |
+ '', |
+ ] |
+ for pkg, version in sorted(packages.iteritems()): |
+ lines.append('%s %s' % (pkg, version)) |
+ |
+ ensure_directory(self._root) |
+ with open(package_list, 'w+') as fd: |
+ fd.write('\n'.join(lines)) |
+ |
+ # Ensure against the package list. |
+ args = [ |
+ 'ensure', |
+ '-root', self._root, |
+ '-list', package_list, |
+ ] |
+ self(*args) |
+ |
+ |
+def _get_service_account_json(rt, opts): |
+ """Returns (str/None): If specified, the path to the service account JSON. |
+ |
+ This method probes the local environemnt and returns a (possibly empty) list |
+ of arguments to add to the Butler command line for authentication. |
+ |
+ If we're running on a GCE instance, no arguments will be returned, as GCE |
+ service account is implicitly authenticated. If we're running on Baremetal, |
+ a path to those credentials will be returned. |
+ |
+ Args: |
+ rt (RecipeRuntime): The runtime environment. |
+ Raises: |
+ |LogDogBootstrapError| if no credentials could be found. |
+ """ |
+ path = opts.get('service_account_json') |
+ if path: |
+ return path |
+ |
+ if GceAuthenticator.is_gce(): |
+ LOGGER.info('Running on GCE. No credentials necessary.') |
+ return None |
+ |
+ for credential_path in rt.get('credential_paths', ()): |
+ candidate = os.path.join(credential_path, 'logdog_service_account.json') |
+ if os.path.isfile(candidate): |
+ return candidate |
+ |
+ raise LogDogBootstrapError('Could not find service account credentials.') |
+ |
+ |
+def _logdog_bootstrap(rt, opts, cmd): |
iannucci
2015/12/01 02:38:01
need docstrings for these suckers
dnj
2015/12/01 03:36:04
Done.
|
+ bootstrap_dir = ensure_directory(rt.tempdir, 'logdog_bootstrap') |
+ butler, annotee = opts.logdog_butler_path, opts.logdog_annotee_path |
+ if not (butler and annotee): |
+ # Load packages via CIPD. |
+ cipd = CIPD.find(rt, os.path.join(bootstrap_dir, 'cipd_root')) |
+ if rt.logdog_cipd_packages: |
+ if not cipd: |
+ raise LogDogBootstrapError('Could not find CIPD binary.') |
+ cipd.ensure(**rt.logdog_cipd_packages) |
+ if not butler: |
+ butler = cipd.path('logdog_butler') |
+ if not annotee: |
+ annotee = cipd.path('logdog_annotee') |
+ |
+ if not is_executable(annotee): |
+ raise LogDogNotBootstrapped('Annotee is not executable: %s' % (annotee,)) |
+ if not is_executable(butler): |
+ raise LogDogNotBootstrapped('Butler is not executable: %s' % (butler,)) |
+ |
+ # Determine LogDog verbosity. |
+ logdog_verbose = [] |
+ if opts.logdog_verbose == 0: |
+ pass |
+ elif opts.logdog_verbose == 1: |
+ logdog_verbose.extend('-log_level=info') |
+ else: |
+ logdog_verbose.extend('-log_level=debug') |
+ |
+ service_account_args = [] |
+ service_account_json = _get_service_account_json(rt, opts) |
+ if service_account_json: |
+ service_account_args += ['-service-account-json', service_account_json] |
+ |
+ streamserver_uri_gen = rt.logdog_butler_streamserver_gen |
+ if not streamserver_uri_gen: |
+ raise LogDogBootstrapError('No streamserver URI generator.') |
+ streamserver_uri = streamserver_uri_gen(rt.workdir) |
+ |
+ # Dump Annotee command to JSON. |
+ cmd_json = os.path.join(bootstrap_dir, 'annotee_cmd.json') |
+ with open(cmd_json, 'w') as fd: |
+ json.dump(cmd, fd) |
+ |
+ cmd = [ |
+ # Butler Command. |
+ butler, |
+ ] + logdog_verbose + service_account_args + [ |
+ '-output', 'gcps,project="%s",topic="%s"' % (rt.logdog_pubsub.project, |
+ rt.logdog_pubsub.topic), |
+ 'run', |
+ '-streamserver-uri', streamserver_uri, |
+ '--', |
+ |
+ # Annotee Command. |
+ annotee, |
+ ] + logdog_verbose + [ |
+ '-json-args-path', cmd_json, |
+ ] |
+ rv, _ = _run_command(cmd, dry_run=opts.dry_run) |
+ if rv in LOGDOG_ERROR_RETURNCODES: |
+ raise LogDogBootstrapError('LogDog Error (%d)' % (rv,)) |
+ return rv |
+ |
+ |
+def _assert_logdog_whitelisted(rt): |
+ """Asserts that the runtime environment is whitelisted for LogDog bootstrap. |
+ |
+ Args: |
+ rt (RecipeRuntime): The runtime to test. |
+ Raises: |
+ LogDogNotBootstrapped: if the runtime is not whitelisted. |
+ """ |
+ mastername, buildername = rt.get('mastername'), rt.get('buildername') |
+ if not all((mastername, buildername)): |
+ raise LogDogNotBootstrapped('Required mastername/buildernmae is not set.') |
+ |
+ # Key on mastername. |
+ bdict = LOGDOG_WHITELIST_MASTER_BUILDERS.get(mastername) |
+ if bdict is not None: |
+ # Key on buildername. |
+ if WHITELIST_ALL in bdict or buildername in bdict: |
+ LOGGER.info('Whitelisted master %s, builder %s.', |
+ mastername, buildername) |
+ return |
+ raise LogDogNotBootstrapped('Master %s, builder %s is not whitelisted.' % ( |
+ mastername, buildername)) |
def get_recipe_properties(build_properties, use_factory_properties_from_disk): |
@@ -113,42 +437,40 @@ def get_recipe_properties(build_properties, use_factory_properties_from_disk): |
return properties |
-def get_factory_properties_from_disk(mastername, buildername): |
+def get_factory_properties_from_disk(rt): |
master_list = master_cfg_utils.GetMasters() |
master_path = None |
for name, path in master_list: |
- if name == mastername: |
+ if name == rt.mastername: |
master_path = path |
if not master_path: |
- raise LookupError('master "%s" not found.' % mastername) |
+ raise LookupError('master "%s" not found.' % rt.mastername) |
script_path = os.path.join(BUILD_ROOT, 'scripts', 'tools', |
'dump_master_cfg.py') |
- with namedTempFile() as fname: |
- dump_cmd = [sys.executable, |
- script_path, |
- master_path, fname] |
- proc = subprocess.Popen(dump_cmd, cwd=BUILD_ROOT, stdout=subprocess.PIPE, |
- stderr=subprocess.PIPE) |
- out, err = proc.communicate() |
- exit_code = proc.returncode |
- |
- if exit_code: |
- raise LookupError('Failed to get the master config; dump_master_cfg %s' |
- 'returned %d):\n%s\n%s\n'% ( |
- mastername, exit_code, out, err)) |
- |
- with open(fname, 'rU') as f: |
- config = json.load(f) |
+ master_json = os.path.join(rt.workdir, 'dump_master_cfg.json') |
+ dump_cmd = [sys.executable, |
+ script_path, |
+ master_path, master_json] |
+ proc = subprocess.Popen(dump_cmd, cwd=BUILD_ROOT, stdout=subprocess.PIPE, |
+ stderr=subprocess.PIPE) |
+ out, err = proc.communicate() |
+ if proc.returncode: |
+ raise LookupError('Failed to get the master config; dump_master_cfg %s' |
+ 'returned %d):\n%s\n%s\n'% ( |
+ rt.mastername, proc.returncode, out, err)) |
+ |
+ with open(master_json, 'rU') as f: |
+ config = json.load(f) |
# Now extract just the factory properties for the requested builder |
# from the master config. |
props = {} |
found = False |
for builder_dict in config['builders']: |
- if builder_dict['name'] == buildername: |
+ if builder_dict['name'] == rt.buildername: |
found = True |
factory_properties = builder_dict['factory']['properties'] |
for name, (value, _) in factory_properties.items(): |
@@ -156,50 +478,67 @@ def get_factory_properties_from_disk(mastername, buildername): |
if not found: |
raise LookupError('builder "%s" not found on in master "%s"' % |
- (buildername, mastername)) |
+ (rt.buildername, rt.mastername)) |
if 'recipe' not in props: |
raise LookupError('Cannot find recipe for %s on %s' % |
- (buildername, mastername)) |
+ (rt.buildername, rt.mastername)) |
return props |
def get_args(argv): |
"""Process command-line arguments.""" |
- |
- parser = optparse.OptionParser( |
+ parser = argparse.ArgumentParser( |
iannucci
2015/12/01 02:38:01
separate CL please
dnj
2015/12/01 03:36:04
mmk
|
description='Entry point for annotated builds.') |
- parser.add_option('--build-properties', |
- action='callback', callback=chromium_utils.convert_json, |
- type='string', default={}, |
- help='build properties in JSON format') |
- parser.add_option('--factory-properties', |
- action='callback', callback=chromium_utils.convert_json, |
- type='string', default={}, |
- help='factory properties in JSON format') |
- parser.add_option('--build-properties-gz', |
- action='callback', callback=chromium_utils.convert_gz_json, |
- type='string', default={}, dest='build_properties', |
- help='build properties in b64 gz JSON format') |
- parser.add_option('--factory-properties-gz', |
- action='callback', callback=chromium_utils.convert_gz_json, |
- type='string', default={}, dest='factory_properties', |
- help='factory properties in b64 gz JSON format') |
- parser.add_option('--keep-stdin', action='store_true', default=False, |
- help='don\'t close stdin when running recipe steps') |
- parser.add_option('--master-overrides-slave', action='store_true', |
- help='use the property values given on the command line ' |
- 'from the master, not the ones looked up on the slave') |
- parser.add_option('--use-factory-properties-from-disk', |
- action='store_true', default=False, |
- help='use factory properties loaded from disk on the slave') |
+ parser.add_argument('-v', '--verbose', |
+ action='count', default=0, |
+ help='Increase verbosity. This can be specified multiple times.') |
+ parser.add_argument('-d', '--dry-run', action='store_true', |
+ help='Perform the setup, but refrain from executing the recipe.') |
+ parser.add_argument('-l', '--leak', action='store_true', |
+ help="Refrain from cleaning up generated artifacts.") |
+ parser.add_argument('--build-properties', |
+ type=json.loads, default={}, |
+ help='build properties in JSON format') |
+ parser.add_argument('--factory-properties', |
+ type=json.loads, default={}, |
+ help='factory properties in JSON format') |
+ parser.add_argument('--build-properties-gz', dest='build_properties', |
+ type=chromium_utils.convert_gz_json_type, default={}, |
+ help='build properties in b64 gz JSON format') |
+ parser.add_argument('--factory-properties-gz', dest='factory_properties', |
+ type=chromium_utils.convert_gz_json_type, default={}, |
+ help='factory properties in b64 gz JSON format') |
+ parser.add_argument('--keep-stdin', action='store_true', default=False, |
+ help='don\'t close stdin when running recipe steps') |
+ parser.add_argument('--master-overrides-slave', action='store_true', |
+ help='use the property values given on the command line from the master, ' |
+ 'not the ones looked up on the slave') |
+ parser.add_argument('--use-factory-properties-from-disk', |
+ action='store_true', default=False, |
+ help='use factory properties loaded from disk on the slave') |
+ |
+ group = parser.add_argument_group('LogDog Bootstrap') |
+ group.add_argument('-V', '--logdog-verbose', |
+ action='count', default=0, |
+ help='Increase LogDog verbosity. This can be specified multiple times.') |
+ group.add_argument('-f', '--logdog-force', action='store_true', |
+ help='Force LogDog bootstrapping, even if the system is not configured.') |
+ group.add_argument('--logdog-butler-path', |
+ help='Path to the LogDog Butler. If empty, one will be probed/downloaded ' |
+ 'from CIPD.') |
+ group.add_argument('--logdog-annotee-path', |
+ help='Path to the LogDog Annotee. If empty, one will be ' |
+ 'probed/downloaded from CIPD.') |
+ group.add_argument('--logdog-service-account-json', |
+ help='Path to the service account JSON. If one is not provided, the ' |
+ 'local system credentials will be used.') |
return parser.parse_args(argv) |
def update_scripts(): |
- if os.environ.get('RUN_SLAVE_UPDATED_SCRIPTS'): |
- os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS') |
+ if os.environ.pop('RUN_SLAVE_UPDATED_SCRIPTS', None) is None: |
iannucci
2015/12/01 02:38:01
technically this is a semantic change: before it w
dnj
2015/12/01 03:36:04
Good point, I'll go ahead and revert this.
|
return False |
stream = annotator.StructuredAnnotationStream() |
@@ -223,7 +562,8 @@ def update_scripts(): |
'cwd': BUILD_ROOT, |
} |
annotator.print_step(cmd_dict, os.environ, stream) |
- if subprocess.call(gclient_cmd, cwd=BUILD_ROOT) != 0: |
+ rv, _ = _run_command(gclient_cmd, cwd=BUILD_ROOT) |
+ if rv != 0: |
s.step_text('gclient sync failed!') |
s.step_warnings() |
elif output_json: |
@@ -247,7 +587,7 @@ def update_scripts(): |
try: |
os.remove(output_json) |
except Exception as e: |
- print >> sys.stderr, "LEAKED:", output_json, e |
+ LOGGER.warning("LEAKED: %s", output_json, exc_info=True) |
else: |
s.step_text('Unable to get SCM data') |
s.step_warnings() |
@@ -271,79 +611,56 @@ def clean_old_recipe_engine(): |
os.path.join(BUILD_ROOT, 'third_party', 'recipe_engine')): |
for filename in filenames: |
if filename.endswith('.pyc'): |
- path = os.path.join(dirpath, filename) |
- os.remove(path) |
- |
+ os.remove(os.path.join(dirpath, filename)) |
-@contextlib.contextmanager |
-def build_data_directory(): |
- """Context manager that creates a build-specific directory. |
- The directory is wiped when exiting. |
+def write_monitoring_event(rt, outdir): |
+ if not (rt.run_cmd and os.path.exists(rt.run_cmd)): |
+ LOGGER.warning('Unable to find run.py at %s, no events will be sent.', |
+ rt.run_cmd) |
+ return |
- Yields: |
- build_data (str or None): full path to a writeable directory. Return None if |
- no directory can be found or if it's not writeable. |
- """ |
- prefix = 'build_data' |
- |
- # TODO(pgervais): import that from infra_libs.logs instead |
- if sys.platform.startswith('win'): # pragma: no cover |
- DEFAULT_LOG_DIRECTORIES = [ |
- 'E:\\chrome-infra-logs', |
- 'C:\\chrome-infra-logs', |
- ] |
+ hostname = socket.getfqdn() |
+ if hostname: # just in case getfqdn() returns None. |
+ hostname = hostname.split('.')[0] |
else: |
- DEFAULT_LOG_DIRECTORIES = ['/var/log/chrome-infra'] |
- |
- build_data_dir = None |
- for candidate in DEFAULT_LOG_DIRECTORIES: |
- if os.path.isdir(candidate): |
- build_data_dir = os.path.join(candidate, prefix) |
- break |
- |
- # Remove any leftovers and recreate the dir. |
- if build_data_dir: |
- print >> sys.stderr, "Creating directory" |
- # TODO(pgervais): use infra_libs.rmtree instead. |
- if os.path.exists(build_data_dir): |
- try: |
- shutil.rmtree(build_data_dir) |
- except Exception as exc: |
- # Catching everything: we don't want to break any builds for that reason |
- print >> sys.stderr, ( |
- "FAILURE: path can't be deleted: %s.\n%s" % (build_data_dir, str(exc)) |
- ) |
- print >> sys.stderr, "Creating directory" |
- |
- if not os.path.exists(build_data_dir): |
- try: |
- os.mkdir(build_data_dir) |
- except Exception as exc: |
- print >> sys.stderr, ( |
- "FAILURE: directory can't be created: %s.\n%s" % |
- (build_data_dir, str(exc)) |
- ) |
- build_data_dir = None |
- |
- # Under this line build_data_dir should point to an existing empty dir |
- # or be None. |
- yield build_data_dir |
- |
- # Clean up after ourselves |
- if build_data_dir: |
- # TODO(pgervais): use infra_libs.rmtree instead. |
- try: |
- shutil.rmtree(build_data_dir) |
- except Exception as exc: |
- # Catching everything: we don't want to break any builds for that reason. |
- print >> sys.stderr, ( |
- "FAILURE: path can't be deleted: %s.\n%s" % (build_data_dir, str(exc)) |
- ) |
+ hostname = None |
+ |
+ try: |
+ cmd = [rt.run_cmd, 'infra.tools.send_monitoring_event', |
+ '--event-mon-output-file', |
+ os.path.join(outdir, 'log_request_proto'), |
+ '--event-mon-run-type', 'file', |
+ '--event-mon-service-name', |
+ 'buildbot/master/master.%s' |
+ % rt.get('mastername', 'UNKNOWN'), |
+ '--build-event-build-name', |
+ rt.get('buildername', 'UNKNOWN'), |
+ '--build-event-build-number', |
+ str(rt.get('buildnumber', 0)), |
+ '--build-event-build-scheduling-time', |
+ str(1000*int(rt.get('requestedAt', 0))), |
+ '--build-event-type', 'BUILD', |
+ '--event-mon-timestamp-kind', 'POINT', |
+ # And use only defaults for credentials. |
+ ] |
+ # Add this conditionally so that we get an error in |
+ # send_monitoring_event log files in case it isn't present. |
+ if hostname: |
+ cmd += ['--build-event-hostname', hostname] |
+ _check_command(cmd) |
+ except Exception: |
+ LOGGER.warning("Failed to send monitoring event.", exc_info=True) |
def main(argv): |
- opts, _ = get_args(argv) |
+ opts = get_args(argv) |
+ if opts.verbose == 0: |
+ level = logging.INFO |
+ else: |
+ level = logging.DEBUG |
+ logging.getLogger().setLevel(level) |
+ |
# TODO(crbug.com/551165): remove flag "factory_properties". |
use_factory_properties_from_disk = (opts.use_factory_properties_from_disk or |
bool(opts.factory_properties)) |
@@ -352,78 +669,67 @@ def main(argv): |
clean_old_recipe_engine() |
- # Find out if the recipe we intend to run is in build_internal's recipes. If |
- # so, use recipes.py from there, otherwise use the one from build. |
- recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py' |
- if os.path.exists(os.path.join(BUILD_LIMITED_ROOT, 'recipes', recipe_file)): |
- recipe_runner = os.path.join(BUILD_LIMITED_ROOT, 'recipes.py') |
- else: |
- recipe_runner = os.path.join(SCRIPT_PATH, 'recipes.py') |
+ # Enter our runtime enviornment. |
+ with RecipeRuntime.enter(opts.leak, **properties) as rt: |
+ LOGGER.debug('Loaded runtime: %s', rt) |
- with build_data_directory() as build_data_dir: |
- # Create a LogRequestLite proto containing this build's information. |
- if build_data_dir: |
- properties['build_data_dir'] = build_data_dir |
+ # Find out if the recipe we intend to run is in build_internal's recipes. If |
+ # so, use recipes.py from there, otherwise use the one from build. |
+ recipe_file = properties['recipe'].replace('/', os.path.sep) + '.py' |
+ if os.path.exists(os.path.join(BUILD_LIMITED_ROOT, 'recipes', recipe_file)): |
+ recipe_runner = os.path.join(BUILD_LIMITED_ROOT, 'recipes.py') |
+ else: |
+ recipe_runner = os.path.join(SCRIPT_PATH, 'recipes.py') |
+ |
+ # Setup monitoring directory and send a monitoring event. |
+ build_data_dir = ensure_directory(rt.workdir, 'build_data') |
+ properties['build_data_dir'] = build_data_dir |
+ |
+ # Write our annotated_run.py monitoring event. |
+ write_monitoring_event(rt, build_data_dir) |
+ |
+ # Dump properties to JSON and build recipe command. |
+ props_file = os.path.join(rt.workdir, 'recipe_properties.json') |
+ with open(props_file, 'w') as fh: |
+ json.dump(properties, fh) |
+ cmd = [ |
+ sys.executable, '-u', recipe_runner, |
+ 'run', |
+ '--workdir=%s' % os.getcwd(), |
+ '--properties-file=%s' % props_file, |
+ rt.recipe, |
+ ] |
- hostname = socket.getfqdn() |
- if hostname: # just in case getfqdn() returns None. |
- hostname = hostname.split('.')[0] |
- else: |
- hostname = None |
+ status = None |
+ try: |
+ if not opts.logdog_force: |
+ _assert_logdog_whitelisted(rt) |
iannucci
2015/12/01 02:38:01
why not just have this return a status code? I don
dnj
2015/12/01 03:36:04
I chose exceptions here because I'm already surrou
|
+ |
+ status = _logdog_bootstrap(rt, opts, cmd) |
iannucci
2015/12/01 02:38:01
So IIUC, on windows this will now look like
annot
dnj
2015/12/01 03:36:04
It should be:
annotated_run.py
annotated_run.py
|
+ except LogDogNotBootstrapped as e: |
+ LOGGER.info('Not bootstrapped: %s', e.message) |
+ except LogDogBootstrapError as e: |
+ LOGGER.warning('Could not bootstrap LogDog: %s', e.message) |
+ except Exception: |
+ LOGGER.exception('Exception while bootstrapping LogDog.') |
+ finally: |
+ if status is None: |
+ LOGGER.info('Not using LogDog. Invoking `annotated_run.py` directly.') |
+ status, _ = _run_command(cmd, dry_run=opts.dry_run) |
- if RUN_CMD and os.path.exists(RUN_CMD): |
- try: |
- cmd = [RUN_CMD, 'infra.tools.send_monitoring_event', |
- '--event-mon-output-file', |
- os.path.join(build_data_dir, 'log_request_proto'), |
- '--event-mon-run-type', 'file', |
- '--event-mon-service-name', |
- 'buildbot/master/master.%s' |
- % properties.get('mastername', 'UNKNOWN'), |
- '--build-event-build-name', |
- properties.get('buildername', 'UNKNOWN'), |
- '--build-event-build-number', |
- str(properties.get('buildnumber', 0)), |
- '--build-event-build-scheduling-time', |
- str(1000*int(properties.get('requestedAt', 0))), |
- '--build-event-type', 'BUILD', |
- '--event-mon-timestamp-kind', 'POINT', |
- # And use only defaults for credentials. |
- ] |
- # Add this conditionally so that we get an error in |
- # send_monitoring_event log files in case it isn't present. |
- if hostname: |
- cmd += ['--build-event-hostname', hostname] |
- subprocess.call(cmd) |
- except Exception: |
- print >> sys.stderr, traceback.format_exc() |
- |
- else: |
- print >> sys.stderr, ( |
- 'WARNING: Unable to find run.py at %r, no events will be sent.' |
- % str(RUN_CMD) |
- ) |
- |
- with namedTempFile() as props_file: |
- with open(props_file, 'w') as fh: |
- fh.write(json.dumps(properties)) |
- cmd = [ |
- sys.executable, '-u', recipe_runner, |
- 'run', |
- '--workdir=%s' % os.getcwd(), |
- '--properties-file=%s' % props_file, |
- properties['recipe'] ] |
- status = subprocess.call(cmd) |
- |
- # TODO(pgervais): Send events from build_data_dir to the endpoint. |
+ # TODO(pgervais): Send events from build_data_dir to the endpoint. |
return status |
+ |
def shell_main(argv): |
if update_scripts(): |
- return subprocess.call([sys.executable] + argv) |
+ # Re-execute with the updated annotated_run.py. |
+ rv, _ = _run_command([sys.executable] + argv) |
+ return rv |
else: |
- return main(argv) |
+ return main(argv[1:]) |
if __name__ == '__main__': |
+ logging.basicConfig(level=logging.INFO) |
sys.exit(shell_main(sys.argv)) |