Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(333)

Unified Diff: tools/deep_memory_profiler/dmprof.py

Issue 11417048: Retry: Add a first test for tools/deep_memory_profiler. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « tools/deep_memory_profiler/dmprof ('k') | tools/deep_memory_profiler/tests/dmprof_test.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/deep_memory_profiler/dmprof.py
diff --git a/tools/deep_memory_profiler/dmprof b/tools/deep_memory_profiler/dmprof.py
old mode 100755
new mode 100644
similarity index 94%
copy from tools/deep_memory_profiler/dmprof
copy to tools/deep_memory_profiler/dmprof.py
index 1951838c4f75571e8c5f5bb967f9f83c23bc2ed6..7c14b80a3b71b8e25e5b47cf199c61cdb20a77ff
--- a/tools/deep_memory_profiler/dmprof
+++ b/tools/deep_memory_profiler/dmprof.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
M-A Ruel 2012/11/17 23:23:10 Why are you removing the shebang and removing the
Dai Mikurube (NOT FULLTIME) 2012/11/18 06:01:54 It's because now dmprof can be executed by the she
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,10 +10,7 @@ import logging
import optparse
import os
import re
-import shutil
-import subprocess
import sys
-import tempfile
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
FIND_RUNTIME_SYMBOLS_PATH = os.path.join(
@@ -79,6 +75,7 @@ POLICY_DEEP_4 = 'POLICY_DEEP_4'
class EmptyDumpException(Exception):
def __init__(self, value):
+ super(EmptyDumpException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
@@ -86,6 +83,7 @@ class EmptyDumpException(Exception):
class ParsingException(Exception):
def __init__(self, value):
+ super(ParsingException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
@@ -93,6 +91,7 @@ class ParsingException(Exception):
class InvalidDumpException(ParsingException):
def __init__(self, value):
+ super(InvalidDumpException, self).__init__()
self.value = value
def __str__(self):
return "invalid heap profile dump: %s" % repr(self.value)
@@ -100,6 +99,7 @@ class InvalidDumpException(ParsingException):
class ObsoleteDumpVersionException(ParsingException):
def __init__(self, value):
+ super(ObsoleteDumpVersionException, self).__init__()
self.value = value
def __str__(self):
return "obsolete heap profile dump version: %s" % repr(self.value)
@@ -268,7 +268,7 @@ class SymbolCache(object):
LOGGER.info('Loaded %d entries from symbol cache.' %
len(self._symbol_caches[address_type]))
except IOError as e:
- LOGGER.info('No valid symbol cache file is found.')
+ LOGGER.info('No valid symbol cache file is found: %s' % e)
class Rule(object):
@@ -349,33 +349,33 @@ class Policy(object):
assert False
@staticmethod
- def load(filename, format):
+ def load(filename, filetype):
"""Loads a policy file of |filename| in a |format|.
Args:
filename: A filename to be loaded.
- format: A string to specify a format of the file. Only 'json' is
+ filetype: A string to specify a type of the file. Only 'json' is
supported for now.
Returns:
A loaded Policy object.
"""
with open(os.path.join(BASE_PATH, filename)) as policy_f:
- return Policy.parse(policy_f, format)
+ return Policy.parse(policy_f, filetype)
@staticmethod
- def parse(policy_f, format):
+ def parse(policy_f, filetype):
"""Parses a policy file content in a |format|.
Args:
policy_f: An IO object to be loaded.
- format: A string to specify a format of the file. Only 'json' is
+ filetype: A string to specify a type of the file. Only 'json' is
supported for now.
Returns:
A loaded Policy object.
"""
- if format == 'json':
+ if filetype == 'json':
return Policy._parse_json(policy_f)
else:
return None
@@ -543,18 +543,13 @@ class BucketSet(object):
TYPEINFO_ADDRESS: set(),
}
- @staticmethod
- def load(prefix):
+ def load(self, prefix):
"""Loads all related bucket files.
Args:
prefix: A prefix string for bucket file names.
-
- Returns:
- A loaded BucketSet object.
"""
LOGGER.info('Loading bucket files.')
- bucket_set = BucketSet()
n = 0
while True:
@@ -566,11 +561,9 @@ class BucketSet(object):
continue
LOGGER.info(' %s' % path)
with open(path, 'r') as f:
- bucket_set._load_file(f)
+ self._load_file(f)
n += 1
- return bucket_set
-
def _load_file(self, bucket_f):
for line in bucket_f:
words = line.split()
@@ -620,9 +613,9 @@ class BucketSet(object):
class Dump(object):
"""Represents a heap profile dump."""
- def __init__(self):
- self._path = ''
- self._time = None
+ def __init__(self, path, time):
+ self._path = path
+ self._time = time
self._stacktrace_lines = []
self._global_stats = {} # used only in apply_policy
@@ -659,27 +652,26 @@ class Dump(object):
Raises:
ParsingException for invalid heap profile dumps.
"""
- dump = Dump()
- dump._path = path
- dump._time = os.stat(dump._path).st_mtime
- dump._version = ''
+ dump = Dump(path, os.stat(path).st_mtime)
+ with open(path, 'r') as f:
+ dump.load_file(f, log_header)
+ return dump
- dump._lines = [line for line in open(dump._path, 'r')
+ def load_file(self, f, log_header):
+ self._lines = [line for line in f
if line and not line.startswith('#')]
try:
- dump._version, ln = dump._parse_version()
- dump._parse_global_stats()
- dump._extract_stacktrace_lines(ln)
+ self._version, ln = self._parse_version()
+ self._parse_global_stats()
+ self._extract_stacktrace_lines(ln)
except EmptyDumpException:
- LOGGER.info('%s%s ...ignored an empty dump.' % (log_header, path))
+ LOGGER.info('%s%s ...ignored an empty dump.' % (log_header, self._path))
except ParsingException, e:
- LOGGER.error('%s%s ...error %s' % (log_header, path, e))
+ LOGGER.error('%s%s ...error %s' % (log_header, self._path, e))
raise
else:
- LOGGER.info('%s%s (version: %s)' % (log_header, path, dump._version))
-
- return dump
+ LOGGER.info('%s%s (version:%s)' % (log_header, self._path, self._version))
def _parse_version(self):
"""Parses a version string in self._lines.
@@ -825,7 +817,8 @@ class Command(object):
prefix = Command._find_prefix(dump_path)
symbol_mapping = SymbolMapping(prefix)
symbol_mapping.prepare()
- bucket_set = BucketSet.load(prefix)
+ bucket_set = BucketSet()
+ bucket_set.load(prefix)
if multiple:
dump_list = DumpList.load(Command._find_all_dumps(dump_path))
else:
@@ -867,7 +860,8 @@ class Command(object):
return None
return (options, args)
- def _parse_policy_list(self, options_policy):
+ @staticmethod
+ def _parse_policy_list(options_policy):
if options_policy:
return options_policy.split(',')
else:
@@ -880,7 +874,7 @@ class StacktraceCommand(Command):
'Usage: %prog stacktrace <dump>')
def do(self, sys_argv):
- options, args = self._parse_args(sys_argv, 1)
+ _, args = self._parse_args(sys_argv, 1)
dump_path = args[1]
(bucket_set, dump) = Command.load_basic_files(dump_path, False)
@@ -919,10 +913,11 @@ class PolicyCommands(Command):
dump_path = args[1]
(bucket_set, dumps) = Command.load_basic_files(dump_path, True)
- policy_set = PolicySet.load(self._parse_policy_list(options.policy))
+ policy_set = PolicySet.load(Command._parse_policy_list(options.policy))
return policy_set, dumps, bucket_set
- def _apply_policy(self, dump, policy, bucket_set, first_dump_time):
+ @staticmethod
+ def _apply_policy(dump, policy, bucket_set, first_dump_time):
"""Aggregates the total memory size of each component.
Iterate through all stacktraces and attribute them to one of the components
@@ -1026,9 +1021,10 @@ class CSVCommand(PolicyCommands):
def do(self, sys_argv):
policy_set, dumps, bucket_set = self._set_up(sys_argv)
- return self._output(policy_set, dumps, bucket_set, sys.stdout)
+ return CSVCommand._output(policy_set, dumps, bucket_set, sys.stdout)
- def _output(self, policy_set, dumps, bucket_set, out):
+ @staticmethod
+ def _output(policy_set, dumps, bucket_set, out):
max_components = 0
for label in policy_set:
max_components = max(max_components, len(policy_set[label].components))
@@ -1042,7 +1038,7 @@ class CSVCommand(PolicyCommands):
LOGGER.info('Applying a policy %s to...' % label)
for dump in dumps:
- component_sizes = self._apply_policy(
+ component_sizes = PolicyCommands._apply_policy(
dump, policy_set[label], bucket_set, dumps[0].time)
s = []
for c in components:
@@ -1064,9 +1060,10 @@ class JSONCommand(PolicyCommands):
def do(self, sys_argv):
policy_set, dumps, bucket_set = self._set_up(sys_argv)
- return self._output(policy_set, dumps, bucket_set, sys.stdout)
+ return JSONCommand._output(policy_set, dumps, bucket_set, sys.stdout)
- def _output(self, policy_set, dumps, bucket_set, out):
+ @staticmethod
+ def _output(policy_set, dumps, bucket_set, out):
json_base = {
'version': 'JSON_DEEP_2',
'policies': {},
@@ -1080,7 +1077,7 @@ class JSONCommand(PolicyCommands):
LOGGER.info('Applying a policy %s to...' % label)
for dump in dumps:
- component_sizes = self._apply_policy(
+ component_sizes = PolicyCommands._apply_policy(
dump, policy_set[label], bucket_set, dumps[0].time)
component_sizes['dump_path'] = dump.path
component_sizes['dump_time'] = datetime.fromtimestamp(
@@ -1100,13 +1097,14 @@ class ListCommand(PolicyCommands):
def do(self, sys_argv):
policy_set, dumps, bucket_set = self._set_up(sys_argv)
- return self._output(policy_set, dumps, bucket_set, sys.stdout)
+ return ListCommand._output(policy_set, dumps, bucket_set, sys.stdout)
- def _output(self, policy_set, dumps, bucket_set, out):
+ @staticmethod
+ def _output(policy_set, dumps, bucket_set, out):
for label in sorted(policy_set):
LOGGER.info('Applying a policy %s to...' % label)
for dump in dumps:
- component_sizes = self._apply_policy(
+ component_sizes = PolicyCommands._apply_policy(
dump, policy_set[label], bucket_set, dump.time)
out.write('%s for %s:\n' % (label, dump.path))
for c in policy_set[label].components:
@@ -1126,19 +1124,20 @@ class ExpandCommand(Command):
'Usage: %prog expand <dump> <policy> <component> <depth>')
def do(self, sys_argv):
- options, args = self._parse_args(sys_argv, 4)
+ _, args = self._parse_args(sys_argv, 4)
dump_path = args[1]
target_policy = args[2]
component_name = args[3]
depth = args[4]
(bucket_set, dump) = Command.load_basic_files(dump_path, False)
- policy_set = PolicySet.load(self._parse_policy_list(target_policy))
+ policy_set = PolicySet.load(Command._parse_policy_list(target_policy))
- self._output(dump, policy_set[target_policy], bucket_set,
- component_name, int(depth), sys.stdout)
+ ExpandCommand._output(dump, policy_set[target_policy], bucket_set,
+ component_name, int(depth), sys.stdout)
return 0
- def _output(self, dump, policy, bucket_set, component_name, depth, out):
+ @staticmethod
+ def _output(dump, policy, bucket_set, component_name, depth, out):
"""Prints all stacktraces in a given component of given depth.
Args:
@@ -1197,7 +1196,7 @@ class PProfCommand(Command):
component = options.component
(bucket_set, dump) = Command.load_basic_files(dump_path, False)
- policy_set = PolicySet.load(self._parse_policy_list(target_policy))
+ policy_set = PolicySet.load(Command._parse_policy_list(target_policy))
with open(Command._find_prefix(dump_path) + '.maps', 'r') as maps_f:
maps_lines = maps_f.readlines()
@@ -1300,7 +1299,7 @@ def main():
}
if len(sys.argv) < 2 or (not sys.argv[1] in COMMANDS):
- sys.stderr.write("""Usage: %s <command> [options] [<args>]
+ sys.stderr.write("""Usage: dmprof <command> [options] [<args>]
Commands:
csv Classify memory usage in CSV
@@ -1317,7 +1316,7 @@ Quick Reference:
dmprof list [-p POLICY] <first-dump>
dmprof pprof [-c COMPONENT] <dump> <policy>
dmprof stacktrace <dump>
-""" % (sys.argv[0]))
+""")
sys.exit(1)
action = sys.argv.pop(1)
« no previous file with comments | « tools/deep_memory_profiler/dmprof ('k') | tools/deep_memory_profiler/tests/dmprof_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698