Chromium Code Reviews| Index: presubmit_canned_checks.py | 
| diff --git a/presubmit_canned_checks.py b/presubmit_canned_checks.py | 
| index 9179fb6ee35436d654ecae91ed4e02692514d0b3..df6159664f05c4825d86ef737b10a3ac36511df7 100644 | 
| --- a/presubmit_canned_checks.py | 
| +++ b/presubmit_canned_checks.py | 
| @@ -788,17 +788,24 @@ def GetPylint(input_api, output_api, white_list=None, black_list=None, | 
| kwargs={'env': env, 'stdin': '\n'.join(files + extra_args)}, | 
| message=error_type) | 
| - # Always run pylint and pass it all the py files at once. | 
| - # Passing py files one at time is slower and can produce | 
| - # different results. input_api.verbose used to be used | 
| - # to enable this behaviour but differing behaviour in | 
| - # verbose mode is not desirable. | 
| - # Leave this unreachable code in here so users can make | 
| - # a quick local edit to diagnose pylint issues more | 
| - # easily. | 
| + # Always run pylint and pass it all the (sharded) py files at once. Passing py | 
| + # files one at time is slower and can produce different results. | 
| + # input_api.verbose used to be used to enable this behaviour but differing | 
| + # behaviour in verbose mode is not desirable. | 
| + # | 
| + # We shard the pylint invocations by the number of CPUs, since they tend to | 
| + # saturate a CPU entirely (but never more than 100%, thanks to the GIL). | 
| if True: | 
| - return [GetPylintCmd(files)] | 
| + # number of files to allow in one command | 
| + limit = max(len(files) / input_api.cpu_count, 1) | 
| + ret = [] | 
| + while files: | 
| + ret.append(GetPylintCmd(files[:limit])) | 
| + files = files[limit:] | 
| 
 
nednguyen
2015/06/12 18:06:14
If we shard the files arbitrarily, how does this a
 
 | 
| + return ret | 
| else: | 
| + # Leave this unreachable code in here so users can make a quick local edit | 
| + # to diagnose pylint issues more easily. | 
| return map(lambda x: GetPylintCmd([x]), files) |