Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(643)

Side by Side Diff: merger.py

Issue 12178026: Adds console renderer to merger backend. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/tools/chromium-build
Patch Set: Responding to comments. Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « app.py ('k') | templates/merger_b.html » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import datetime 5 import datetime
6 import logging 6 import logging
7 import jinja2
7 import webapp2 8 import webapp2
8 9
9 import app 10 import app
10 import base_page 11 import base_page
11 import utils 12 import utils
12 13
14 from third_party.BeautifulSoup.BeautifulSoup import BeautifulSoup
13 15
14 class BuildData(object): 16 class BuildData(object):
15 """Represents a single build in the waterfall. 17 """Represents a single build in the waterfall.
16 18
17 Not yet used (this backend only renders a console so far). 19 Not yet used (this backend only renders a console so far).
18 TODO(agable): Use, and include step-level info. 20 TODO(agable): Use, and include step-level info.
19 """ 21 """
20 22
21 STATUS_ENUM = ( 23 STATUS_ENUM = (
22 'notstarted', 24 'notstarted',
23 'running', 25 'running',
24 'success', 26 'success',
25 'warnings', 27 'warnings',
26 'failure', 28 'failure',
27 'exception', 29 'exception',
28 ) 30 )
29 31
30 def __init__(self): 32 def __init__(self):
31 self.status = 0 33 self.status = 0
32 34
33 35
34 class RowData(object): 36 class RowData(object):
35 """Represents a single row of the console. 37 """Represents a single row of the console.
36 38
37 Includes all individual builder statuses. 39 Includes all individual builder statuses.
38 """ 40 """
39 41
40 def __init__(self): 42 def __init__(self):
41 self.revision = 0 43 self.revision = 0
44 self.revlink = None
42 self.committer = None 45 self.committer = None
43 self.comment = None 46 self.comment = None
44 self.details = None 47 self.details = None
48 # Per-builder status stored at self.status[master][category][builder].
45 self.status = {} 49 self.status = {}
46 self.timestamp = datetime.datetime.now() 50 self.timestamp = datetime.datetime.now()
47 51
52 def purge_unicode(self, enc='ascii', err='replace'):
53 self.committer = self.committer.encode(enc, err)
54 self.comment = self.comment.encode(enc, err)
55 self.details = self.details.encode(enc, err)
48 56
49 class MergerData(object): 57 class MergerData(object):
50 """Persistent data storage class. 58 """Persistent data storage class.
51 59
52 Holds all of the data we have about the last 100 revisions. 60 Holds all of the data we have about the last 100 revisions.
53 Keeps it organized and can render it upon request. 61 Keeps it organized and can render it upon request.
54 """ 62 """
55 63
56 def __init__(self): 64 def __init__(self):
57 self.SIZE = 100 65 self.SIZE = 100
66 # Straight list of masters to display.
67 self.ordered_masters = app.DEFAULT_MASTERS_TO_MERGE
68 # Ordered categories, indexed by master.
69 self.ordered_categories = {}
70 # Ordered builders, indexed by master and category.
71 self.ordered_builders = {}
72 self.latest_rev = 0
58 self.rows = {} 73 self.rows = {}
59 self.latest_rev = 0 74 self.status = {}
60 self.ordered_builders = []
61 self.failures = {} 75 self.failures = {}
62 76
63 def bootstrap(self): 77 def bootstrap(self):
64 """Fills an empty MergerData with 100 rows of data.""" 78 """Fills an empty MergerData with 100 rows of data."""
79 # Populate the categories, masters, status, and failures data.
80 for m in self.ordered_masters:
81 for d in (self.ordered_builders,
82 self.ordered_categories,
83 self.status,
84 self.failures):
85 d.setdefault(m, {})
86 # Get the category data and construct the list of categories
87 # for this master.
88 category_data = app.get_and_cache_pagedata('%s/console/categories' % m)
89 if not category_data['content']:
90 category_list = [u'default']
91 else:
92 category_soup = BeautifulSoup(category_data['content'])
93 category_list = [tag.string.strip() for tag in
94 category_soup.findAll('td', 'DevStatus')]
95 self.ordered_categories[m] = category_list
96 # Get the builder status data.
97 builder_data = app.get_and_cache_pagedata('%s/console/summary' % m)
98 if not builder_data['content']:
99 continue
100 builder_soup = BeautifulSoup(builder_data['content'])
101 builders_by_category = builder_soup.tr.findAll('td', 'DevSlave',
102 recursive=False)
103 # Construct the list of builders for this category.
104 for i, c in enumerate(self.ordered_categories[m]):
105 self.ordered_builders[m].setdefault(c, {})
106 builder_list = [tag['title'] for tag in
107 builders_by_category[i].findAll('a', 'DevSlaveBox')]
108 self.ordered_builders[m][c] = builder_list
109 # Fill in the status data for all of this master's builders.
110 update_status(m, builder_data['content'], self.status)
111 # Copy that status data over into the failures dictionary too.
112 for c in self.ordered_categories[m]:
113 self.failures[m].setdefault(c, {})
114 for b in self.ordered_builders[m][c]:
115 if self.status[m][c][b] not in ('success', 'running', 'notstarted'):
116 self.failures[m][c][b] = True
117 else:
118 self.failures[m][c][b] = False
119 # Populate the individual row data, saving status info in the same
120 # master/category/builder tree format constructed above.
65 latest_rev = int(app.get_and_cache_rowdata('latest_rev')['rev_number']) 121 latest_rev = int(app.get_and_cache_rowdata('latest_rev')['rev_number'])
66 if not latest_rev: 122 if not latest_rev:
67 logging.error("MergerData.bootstrap(): Didn't get latest_rev. Aborting.") 123 logging.error("MergerData.bootstrap(): Didn't get latest_rev. Aborting.")
68 return 124 return
69 n = latest_rev 125 n = latest_rev
70 num_rows_saved = num_rows_skipped = 0 126 num_rows_saved = num_rows_skipped = 0
71 while num_rows_saved < self.SIZE and num_rows_skipped < 10: 127 while num_rows_saved < self.SIZE and num_rows_skipped < 10:
72 logging.info('MergerData.bootstrap(): Getting revision %s' % n)
73 curr_row = RowData() 128 curr_row = RowData()
74 for m in app.DEFAULT_MASTERS_TO_MERGE: 129 for m in self.ordered_masters:
75 # Fetch the relevant data from the datastore / cache. 130 update_row(n, m, curr_row)
76 row_data = app.get_and_cache_rowdata('%s/console/%s' % (m, n))
77 if not row_data:
78 continue
79 # Only grab the common data from the main master.
80 if m == 'chromium.main':
81 curr_row.revision = int(row_data['rev_number'])
82 curr_row.committer = row_data['name']
83 curr_row.comment = row_data['comment']
84 curr_row.details = row_data['details']
85 curr_row.status[m] = row_data['status']
86 # If we didn't get any data, that revision doesn't exist, so skip on. 131 # If we didn't get any data, that revision doesn't exist, so skip on.
87 if not curr_row.revision: 132 if not curr_row.revision:
88 logging.info('MergerData.bootstrap(): No data for revision %s' % n)
89 num_rows_skipped += 1 133 num_rows_skipped += 1
90 n -= 1 134 n -= 1
91 continue 135 continue
92 logging.info('MergerData.bootstrap(): Got data for revision %s' % n)
93 self.rows[n] = curr_row 136 self.rows[n] = curr_row
94 num_rows_skipped = 0 137 num_rows_skipped = 0
95 num_rows_saved += 1 138 num_rows_saved += 1
96 n -= 1 139 n -= 1
97 self.latest_rev = max(self.rows.keys()) 140 self.latest_rev = max(self.rows.keys())
98 141
99 142
143 def update_row(revision, master, row):
144 """Fetches a row from the datastore and puts it in a RowData object."""
145 # Fetch the relevant data from the datastore / cache.
146 row_data = app.get_and_cache_rowdata('%s/console/%s' % (master, revision))
147 if not row_data:
148 return
149 # Only grab the common data from the main master.
150 if master == 'chromium.main':
151 row.revision = int(row_data['rev_number'])
152 row.revlink = row_data['rev']
153 row.committer = row_data['name']
154 row.comment = row_data['comment']
155 row.details = row_data['details']
156 row.status.setdefault(master, {})
157 update_status(master, row_data['status'], row.status)
158
159
160 def update_status(master, status_html, status_dict):
161 """Parses build status information and saves it to a status dictionary."""
162 builder_soup = BeautifulSoup(status_html)
163 builders_by_category = builder_soup.findAll('table')
164 for i, c in enumerate(data.ordered_categories[master]):
165 status_dict[master].setdefault(c, {})
166 statuses_by_builder = builders_by_category[i].findAll('td',
167 'DevStatusBox')
168 # If we didn't get anything, it's because we're parsing the overall
169 # summary, so look for Slave boxes instead of Status boxes.
170 if not statuses_by_builder:
171 statuses_by_builder = builders_by_category[i].findAll('td',
172 'DevSlaveBox')
173 for j, b in enumerate(data.ordered_builders[master][c]):
174 # Save the whole link as the status to keep ETA and build number info.
175 status = unicode(statuses_by_builder[j].a)
176 status_dict[master][c][b] = status
177
178
179 def notstarted(status):
180 """Converts a DevSlave status box to a notstarted DevStatus box."""
181 status_soup = BeautifulSoup(status)
182 status_soup['class'] = 'DevStatusBox notstarted'
183 return unicode(status_soup)
184
185
100 class MergerUpdateAction(base_page.BasePage): 186 class MergerUpdateAction(base_page.BasePage):
101 """Handles update requests. 187 """Handles update requests.
102 188
103 Takes data gathered by the cronjob and pulls it into active memory. 189 Takes data gathered by the cronjob and pulls it into active memory.
104 """ 190 """
105 191
106 def get(self): 192 def get(self):
107 logging.info("***BACKEND MERGER UPDATE***")
108 logging.info('BEGIN Stored rows are: %s' % sorted(data.rows))
109 latest_rev = int(app.get_and_cache_rowdata('latest_rev')['rev_number']) 193 latest_rev = int(app.get_and_cache_rowdata('latest_rev')['rev_number'])
110 logging.info('Merger.update(): latest_rev = %s' % latest_rev)
111 # We may have brand new rows, so store them. 194 # We may have brand new rows, so store them.
112 if latest_rev not in data.rows: 195 if latest_rev not in data.rows:
113 logging.info('Merger.update(): Handling new rows.')
114 for n in xrange(data.latest_rev + 1, latest_rev + 1): 196 for n in xrange(data.latest_rev + 1, latest_rev + 1):
115 logging.info('Merger.update(): Getting revision %s' % n)
116 curr_row = RowData() 197 curr_row = RowData()
117 for m in app.DEFAULT_MASTERS_TO_MERGE: 198 for m in data.ordered_masters:
118 # Fetch the relevant data from the datastore / cache. 199 update_row(n, m, curr_row)
119 row_data = app.get_and_cache_rowdata('%s/console/%s' % (m, n))
120 if not row_data:
121 continue
122 # Only grab the common data from the main master.
123 if m == 'chromium.main':
124 curr_row.revision = int(row_data['rev_number'])
125 curr_row.committer = row_data['name']
126 curr_row.comment = row_data['comment']
127 curr_row.details = row_data['details']
128 curr_row.status[m] = row_data['status']
129 # If we didn't get any data, that revision doesn't exist, so skip on. 200 # If we didn't get any data, that revision doesn't exist, so skip on.
130 if not curr_row.revision: 201 if not curr_row.revision:
131 logging.info('Merger.update(): No data for revision %s' % n)
132 continue 202 continue
133 logging.info('Merger.update(): Got data for revision %s' % n)
134 data.rows[n] = curr_row 203 data.rows[n] = curr_row
135 # Update our stored latest_rev to reflect the new data. 204 # Update our stored latest_rev to reflect the new data.
136 data.latest_rev = max(data.rows.keys()) 205 data.latest_rev = max(data.rows.keys())
137 # Now update the status of the rest of the rows. 206 # Now update the status of the rest of the rows.
138 offset = 0 207 offset = 0
139 logging.info('Merger.update(): Updating rows.')
140 while offset < data.SIZE: 208 while offset < data.SIZE:
141 n = data.latest_rev - offset 209 n = data.latest_rev - offset
142 logging.info('Merger.update(): Checking revision %s' % n)
143 if n not in data.rows: 210 if n not in data.rows:
144 logging.info('Merger.update(): Don\'t care about revision %s' % n)
145 offset += 1 211 offset += 1
146 continue 212 continue
147 curr_row = data.rows[n] 213 curr_row = data.rows[n]
148 for m in app.DEFAULT_MASTERS_TO_MERGE: 214 for m in data.ordered_masters:
149 row_data = app.get_and_cache_rowdata('%s/console/%s' % (m, n)) 215 row_data = app.get_and_cache_rowdata('%s/console/%s' % (m, n))
150 if not row_data: 216 if not row_data:
151 continue 217 continue
152 curr_row.status[m] = row_data['status'] 218 update_status(m, row_data['status'], curr_row.status)
153 offset += 1 219 offset += 1
154 logging.info('Merger.update(): Got new data for revision %s' % n)
155 # Finally delete any extra rows that we don't want to keep around. 220 # Finally delete any extra rows that we don't want to keep around.
156 if len(data.rows) > data.SIZE: 221 if len(data.rows) > data.SIZE:
157 old_revs = sorted(data.rows, reverse=True)[data.SIZE:] 222 old_revs = sorted(data.rows.keys(), reverse=True)[data.SIZE:]
158 logging.info('Merger.update(): Deleting rows %s' % old_revs)
159 for rev in old_revs: 223 for rev in old_revs:
160 del data.rows[rev] 224 del data.rows[rev]
161 logging.info('FINAL Stored rows are: %s' % sorted(data.rows)) 225 self.response.out.write('Update completed (rows %s - %s).' %
162 self.response.out.write('Update completed.') 226 (min(data.rows.keys()), max(data.rows.keys())))
163 227
164 228
165 class MergerRenderAction(base_page.BasePage): 229 class MergerRenderAction(base_page.BasePage):
166 230
167 def get(self): 231 def get(self):
232 class TemplateData(object):
233 def __init__(self, rhs, numrevs):
234 self.ordered_rows = sorted(rhs.rows.keys(), reverse=True)[:numrevs]
235 self.ordered_masters = rhs.ordered_masters
236 self.ordered_categories = rhs.ordered_categories
237 self.ordered_builders = rhs.ordered_builders
238 self.status = rhs.status
239 self.rows = {}
240 for row in self.ordered_rows:
241 self.rows[row] = rhs.rows[row].purge_unicode()
242 self.category_count = sum([len(self.ordered_categories[master])
243 for master in self.ordered_masters])
168 num_revs = self.request.get('numrevs') 244 num_revs = self.request.get('numrevs')
169 if num_revs: 245 if num_revs:
170 num_revs = utils.clean_int(num_revs, -1) 246 num_revs = utils.clean_int(num_revs, -1)
171 if not num_revs or num_revs <= 0: 247 if not num_revs or num_revs <= 0:
172 num_revs = 25 248 num_revs = 25
173 self.response.out.write('Render not yet implemented (%s rows).' % num_revs) 249 out = TemplateData(data, num_revs)
250 template = template_environment.get_template('merger_b.html')
251 self.response.out.write(template.render(data=out))
174 252
175 253
176 # Summon our persistent data model into existence. 254 # Summon our persistent data model into existence.
177 data = MergerData() 255 data = MergerData()
178 data.bootstrap() 256 data.bootstrap()
257 template_environment = jinja2.Environment()
258 template_environment.loader = jinja2.FileSystemLoader('templates')
259 template_environment.filters['notstarted'] = notstarted
260
179 261
180 URLS = [ 262 URLS = [
181 ('/restricted/merger/update', MergerUpdateAction), 263 ('/restricted/merger/update', MergerUpdateAction),
182 ('/restricted/merger/render.*', MergerRenderAction), 264 ('/restricted/merger/render.*', MergerRenderAction),
183 ] 265 ]
184 266
185 application = webapp2.WSGIApplication(URLS, debug=True) 267 application = webapp2.WSGIApplication(URLS, debug=True)
OLDNEW
« no previous file with comments | « app.py ('k') | templates/merger_b.html » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698