OLD | NEW |
| (Empty) |
1 # -*- test-case-name: buildbot.test.test_status -*- | |
2 | |
3 from zope.interface import implements | |
4 from twisted.python import log | |
5 from twisted.persisted import styles | |
6 from twisted.internet import reactor, defer, threads | |
7 from twisted.protocols import basic | |
8 from buildbot.process.properties import Properties | |
9 | |
10 import weakref | |
11 import os, shutil, sys, re, urllib, itertools | |
12 import gc | |
13 from cPickle import load, dump | |
14 from cStringIO import StringIO | |
15 | |
16 try: # bz2 is not available on py23 | |
17 from bz2 import BZ2File | |
18 except ImportError: | |
19 BZ2File = None | |
20 | |
21 try: | |
22 from gzip import GzipFile | |
23 except ImportError: | |
24 GzipFile = None | |
25 | |
26 # sibling imports | |
27 from buildbot import interfaces, util, sourcestamp | |
28 | |
29 SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION = range(5) | |
30 Results = ["success", "warnings", "failure", "skipped", "exception"] | |
31 | |
32 | |
33 # build processes call the following methods: | |
34 # | |
35 # setDefaults | |
36 # | |
37 # currentlyBuilding | |
38 # currentlyIdle | |
39 # currentlyInterlocked | |
40 # currentlyOffline | |
41 # currentlyWaiting | |
42 # | |
43 # setCurrentActivity | |
44 # updateCurrentActivity | |
45 # addFileToCurrentActivity | |
46 # finishCurrentActivity | |
47 # | |
48 # startBuild | |
49 # finishBuild | |
50 | |
51 STDOUT = interfaces.LOG_CHANNEL_STDOUT | |
52 STDERR = interfaces.LOG_CHANNEL_STDERR | |
53 HEADER = interfaces.LOG_CHANNEL_HEADER | |
54 ChunkTypes = ["stdout", "stderr", "header"] | |
55 | |
56 class LogFileScanner(basic.NetstringReceiver): | |
57 def __init__(self, chunk_cb, channels=[]): | |
58 self.chunk_cb = chunk_cb | |
59 self.channels = channels | |
60 | |
61 def stringReceived(self, line): | |
62 channel = int(line[0]) | |
63 if not self.channels or (channel in self.channels): | |
64 self.chunk_cb((channel, line[1:])) | |
65 | |
66 class LogFileProducer: | |
67 """What's the plan? | |
68 | |
69 the LogFile has just one FD, used for both reading and writing. | |
70 Each time you add an entry, fd.seek to the end and then write. | |
71 | |
72 Each reader (i.e. Producer) keeps track of their own offset. The reader | |
73 starts by seeking to the start of the logfile, and reading forwards. | |
74 Between each hunk of file they yield chunks, so they must remember their | |
75 offset before yielding and re-seek back to that offset before reading | |
76 more data. When their read() returns EOF, they're finished with the first | |
77 phase of the reading (everything that's already been written to disk). | |
78 | |
79 After EOF, the remaining data is entirely in the current entries list. | |
80 These entries are all of the same channel, so we can do one "".join and | |
81 obtain a single chunk to be sent to the listener. But since that involves | |
82 a yield, and more data might arrive after we give up control, we have to | |
83 subscribe them before yielding. We can't subscribe them any earlier, | |
84 otherwise they'd get data out of order. | |
85 | |
86 We're using a generator in the first place so that the listener can | |
87 throttle us, which means they're pulling. But the subscription means | |
88 we're pushing. Really we're a Producer. In the first phase we can be | |
89 either a PullProducer or a PushProducer. In the second phase we're only a | |
90 PushProducer. | |
91 | |
92 So the client gives a LogFileConsumer to File.subscribeConsumer . This | |
93 Consumer must have registerProducer(), unregisterProducer(), and | |
94 writeChunk(), and is just like a regular twisted.interfaces.IConsumer, | |
95 except that writeChunk() takes chunks (tuples of (channel,text)) instead | |
96 of the normal write() which takes just text. The LogFileConsumer is | |
97 allowed to call stopProducing, pauseProducing, and resumeProducing on the | |
98 producer instance it is given. """ | |
99 | |
100 paused = False | |
101 subscribed = False | |
102 BUFFERSIZE = 2048 | |
103 | |
104 def __init__(self, logfile, consumer): | |
105 self.logfile = logfile | |
106 self.consumer = consumer | |
107 self.chunkGenerator = self.getChunks() | |
108 consumer.registerProducer(self, True) | |
109 | |
110 def getChunks(self): | |
111 f = self.logfile.getFile() | |
112 offset = 0 | |
113 chunks = [] | |
114 p = LogFileScanner(chunks.append) | |
115 f.seek(offset) | |
116 data = f.read(self.BUFFERSIZE) | |
117 offset = f.tell() | |
118 while data: | |
119 p.dataReceived(data) | |
120 while chunks: | |
121 c = chunks.pop(0) | |
122 yield c | |
123 f.seek(offset) | |
124 data = f.read(self.BUFFERSIZE) | |
125 offset = f.tell() | |
126 del f | |
127 | |
128 # now subscribe them to receive new entries | |
129 self.subscribed = True | |
130 self.logfile.watchers.append(self) | |
131 d = self.logfile.waitUntilFinished() | |
132 | |
133 # then give them the not-yet-merged data | |
134 if self.logfile.runEntries: | |
135 channel = self.logfile.runEntries[0][0] | |
136 text = "".join([c[1] for c in self.logfile.runEntries]) | |
137 yield (channel, text) | |
138 | |
139 # now we've caught up to the present. Anything further will come from | |
140 # the logfile subscription. We add the callback *after* yielding the | |
141 # data from runEntries, because the logfile might have finished | |
142 # during the yield. | |
143 d.addCallback(self.logfileFinished) | |
144 | |
145 def stopProducing(self): | |
146 # TODO: should we still call consumer.finish? probably not. | |
147 self.paused = True | |
148 self.consumer = None | |
149 self.done() | |
150 | |
151 def done(self): | |
152 if self.chunkGenerator: | |
153 self.chunkGenerator = None # stop making chunks | |
154 if self.subscribed: | |
155 self.logfile.watchers.remove(self) | |
156 self.subscribed = False | |
157 | |
158 def pauseProducing(self): | |
159 self.paused = True | |
160 | |
161 def resumeProducing(self): | |
162 # Twisted-1.3.0 has a bug which causes hangs when resumeProducing | |
163 # calls transport.write (there is a recursive loop, fixed in 2.0 in | |
164 # t.i.abstract.FileDescriptor.doWrite by setting the producerPaused | |
165 # flag *before* calling resumeProducing). To work around this, we | |
166 # just put off the real resumeProducing for a moment. This probably | |
167 # has a performance hit, but I'm going to assume that the log files | |
168 # are not retrieved frequently enough for it to be an issue. | |
169 | |
170 reactor.callLater(0, self._resumeProducing) | |
171 | |
172 def _resumeProducing(self): | |
173 self.paused = False | |
174 if not self.chunkGenerator: | |
175 return | |
176 try: | |
177 while not self.paused: | |
178 chunk = self.chunkGenerator.next() | |
179 self.consumer.writeChunk(chunk) | |
180 # we exit this when the consumer says to stop, or we run out | |
181 # of chunks | |
182 except StopIteration: | |
183 # if the generator finished, it will have done releaseFile | |
184 self.chunkGenerator = None | |
185 # now everything goes through the subscription, and they don't get to | |
186 # pause anymore | |
187 | |
188 def logChunk(self, build, step, logfile, channel, chunk): | |
189 if self.consumer: | |
190 self.consumer.writeChunk((channel, chunk)) | |
191 | |
192 def logfileFinished(self, logfile): | |
193 self.done() | |
194 if self.consumer: | |
195 self.consumer.unregisterProducer() | |
196 self.consumer.finish() | |
197 self.consumer = None | |
198 | |
199 def _tryremove(filename, timeout, retries): | |
200 """Try to remove a file, and if failed, try again in timeout. | |
201 Increases the timeout by a factor of 4, and only keeps trying for | |
202 another retries-amount of times. | |
203 | |
204 """ | |
205 try: | |
206 os.unlink(filename) | |
207 except OSError: | |
208 if retries > 0: | |
209 reactor.callLater(timeout, _tryremove, filename, timeout * 4, | |
210 retries - 1) | |
211 else: | |
212 log.msg("giving up on removing %s after over %d seconds" % | |
213 (filename, timeout)) | |
214 | |
215 class LogFile: | |
216 """A LogFile keeps all of its contents on disk, in a non-pickle format to | |
217 which new entries can easily be appended. The file on disk has a name | |
218 like 12-log-compile-output, under the Builder's directory. The actual | |
219 filename is generated (before the LogFile is created) by | |
220 L{BuildStatus.generateLogfileName}. | |
221 | |
222 Old LogFile pickles (which kept their contents in .entries) must be | |
223 upgraded. The L{BuilderStatus} is responsible for doing this, when it | |
224 loads the L{BuildStatus} into memory. The Build pickle is not modified, | |
225 so users who go from 0.6.5 back to 0.6.4 don't have to lose their | |
226 logs.""" | |
227 | |
228 implements(interfaces.IStatusLog, interfaces.ILogFile) | |
229 | |
230 finished = False | |
231 length = 0 | |
232 nonHeaderLength = 0 | |
233 tailLength = 0 | |
234 chunkSize = 10*1000 | |
235 runLength = 0 | |
236 # No max size by default | |
237 logMaxSize = None | |
238 # Don't keep a tail buffer by default | |
239 logMaxTailSize = None | |
240 maxLengthExceeded = False | |
241 runEntries = [] # provided so old pickled builds will getChunks() ok | |
242 entries = None | |
243 BUFFERSIZE = 2048 | |
244 filename = None # relative to the Builder's basedir | |
245 openfile = None | |
246 compressMethod = "bz2" | |
247 | |
248 def __init__(self, parent, name, logfilename): | |
249 """ | |
250 @type parent: L{BuildStepStatus} | |
251 @param parent: the Step that this log is a part of | |
252 @type name: string | |
253 @param name: the name of this log, typically 'output' | |
254 @type logfilename: string | |
255 @param logfilename: the Builder-relative pathname for the saved entries | |
256 """ | |
257 self.step = parent | |
258 self.name = name | |
259 self.filename = logfilename | |
260 fn = self.getFilename() | |
261 if os.path.exists(fn): | |
262 # the buildmaster was probably stopped abruptly, before the | |
263 # BuilderStatus could be saved, so BuilderStatus.nextBuildNumber | |
264 # is out of date, and we're overlapping with earlier builds now. | |
265 # Warn about it, but then overwrite the old pickle file | |
266 log.msg("Warning: Overwriting old serialized Build at %s" % fn) | |
267 dirname = os.path.dirname(fn) | |
268 if not os.path.exists(dirname): | |
269 os.makedirs(dirname) | |
270 self.openfile = open(fn, "w+") | |
271 self.runEntries = [] | |
272 self.watchers = [] | |
273 self.finishedWatchers = [] | |
274 self.tailBuffer = [] | |
275 | |
276 def getFilename(self): | |
277 return os.path.join(self.step.build.builder.basedir, self.filename) | |
278 | |
279 def hasContents(self): | |
280 return os.path.exists(self.getFilename() + '.bz2') or \ | |
281 os.path.exists(self.getFilename() + '.gz') or \ | |
282 os.path.exists(self.getFilename()) | |
283 | |
284 def getName(self): | |
285 return self.name | |
286 | |
287 def getStep(self): | |
288 return self.step | |
289 | |
290 def isFinished(self): | |
291 return self.finished | |
292 def waitUntilFinished(self): | |
293 if self.finished: | |
294 d = defer.succeed(self) | |
295 else: | |
296 d = defer.Deferred() | |
297 self.finishedWatchers.append(d) | |
298 return d | |
299 | |
300 def getFile(self): | |
301 if self.openfile: | |
302 # this is the filehandle we're using to write to the log, so | |
303 # don't close it! | |
304 return self.openfile | |
305 # otherwise they get their own read-only handle | |
306 # try a compressed log first | |
307 if BZ2File is not None: | |
308 try: | |
309 return BZ2File(self.getFilename() + ".bz2", "r") | |
310 except IOError: | |
311 pass | |
312 if GzipFile is not None: | |
313 try: | |
314 return GzipFile(self.getFilename() + ".gz", "r") | |
315 except IOError: | |
316 pass | |
317 return open(self.getFilename(), "r") | |
318 | |
319 def getText(self): | |
320 # this produces one ginormous string | |
321 return "".join(self.getChunks([STDOUT, STDERR], onlyText=True)) | |
322 | |
323 def getTextWithHeaders(self): | |
324 return "".join(self.getChunks(onlyText=True)) | |
325 | |
326 def getChunks(self, channels=[], onlyText=False): | |
327 # generate chunks for everything that was logged at the time we were | |
328 # first called, so remember how long the file was when we started. | |
329 # Don't read beyond that point. The current contents of | |
330 # self.runEntries will follow. | |
331 | |
332 # this returns an iterator, which means arbitrary things could happen | |
333 # while we're yielding. This will faithfully deliver the log as it | |
334 # existed when it was started, and not return anything after that | |
335 # point. To use this in subscribe(catchup=True) without missing any | |
336 # data, you must insure that nothing will be added to the log during | |
337 # yield() calls. | |
338 | |
339 f = self.getFile() | |
340 if not self.finished: | |
341 offset = 0 | |
342 f.seek(0, 2) | |
343 remaining = f.tell() | |
344 else: | |
345 offset = 0 | |
346 remaining = None | |
347 | |
348 leftover = None | |
349 if self.runEntries and (not channels or | |
350 (self.runEntries[0][0] in channels)): | |
351 leftover = (self.runEntries[0][0], | |
352 "".join([c[1] for c in self.runEntries])) | |
353 | |
354 # freeze the state of the LogFile by passing a lot of parameters into | |
355 # a generator | |
356 return self._generateChunks(f, offset, remaining, leftover, | |
357 channels, onlyText) | |
358 | |
359 def _generateChunks(self, f, offset, remaining, leftover, | |
360 channels, onlyText): | |
361 chunks = [] | |
362 p = LogFileScanner(chunks.append, channels) | |
363 f.seek(offset) | |
364 if remaining is not None: | |
365 data = f.read(min(remaining, self.BUFFERSIZE)) | |
366 remaining -= len(data) | |
367 else: | |
368 data = f.read(self.BUFFERSIZE) | |
369 | |
370 offset = f.tell() | |
371 while data: | |
372 p.dataReceived(data) | |
373 while chunks: | |
374 channel, text = chunks.pop(0) | |
375 if onlyText: | |
376 yield text | |
377 else: | |
378 yield (channel, text) | |
379 f.seek(offset) | |
380 if remaining is not None: | |
381 data = f.read(min(remaining, self.BUFFERSIZE)) | |
382 remaining -= len(data) | |
383 else: | |
384 data = f.read(self.BUFFERSIZE) | |
385 offset = f.tell() | |
386 del f | |
387 | |
388 if leftover: | |
389 if onlyText: | |
390 yield leftover[1] | |
391 else: | |
392 yield leftover | |
393 | |
394 def readlines(self, channel=STDOUT): | |
395 """Return an iterator that produces newline-terminated lines, | |
396 excluding header chunks.""" | |
397 # TODO: make this memory-efficient, by turning it into a generator | |
398 # that retrieves chunks as necessary, like a pull-driven version of | |
399 # twisted.protocols.basic.LineReceiver | |
400 alltext = "".join(self.getChunks([channel], onlyText=True)) | |
401 io = StringIO(alltext) | |
402 return io.readlines() | |
403 | |
404 def subscribe(self, receiver, catchup): | |
405 if self.finished: | |
406 return | |
407 self.watchers.append(receiver) | |
408 if catchup: | |
409 for channel, text in self.getChunks(): | |
410 # TODO: add logChunks(), to send over everything at once? | |
411 receiver.logChunk(self.step.build, self.step, self, | |
412 channel, text) | |
413 | |
414 def unsubscribe(self, receiver): | |
415 if receiver in self.watchers: | |
416 self.watchers.remove(receiver) | |
417 | |
418 def subscribeConsumer(self, consumer): | |
419 p = LogFileProducer(self, consumer) | |
420 p.resumeProducing() | |
421 | |
422 # interface used by the build steps to add things to the log | |
423 | |
424 def merge(self): | |
425 # merge all .runEntries (which are all of the same type) into a | |
426 # single chunk for .entries | |
427 if not self.runEntries: | |
428 return | |
429 channel = self.runEntries[0][0] | |
430 text = "".join([c[1] for c in self.runEntries]) | |
431 assert channel < 10 | |
432 f = self.openfile | |
433 f.seek(0, 2) | |
434 offset = 0 | |
435 while offset < len(text): | |
436 size = min(len(text)-offset, self.chunkSize) | |
437 f.write("%d:%d" % (1 + size, channel)) | |
438 f.write(text[offset:offset+size]) | |
439 f.write(",") | |
440 offset += size | |
441 self.runEntries = [] | |
442 self.runLength = 0 | |
443 | |
444 def addEntry(self, channel, text): | |
445 assert not self.finished | |
446 | |
447 if isinstance(text, unicode): | |
448 text = text.encode('utf-8') | |
449 if channel != HEADER: | |
450 # Truncate the log if it's more than logMaxSize bytes | |
451 if self.logMaxSize and self.nonHeaderLength > self.logMaxSize: | |
452 # Add a message about what's going on | |
453 if not self.maxLengthExceeded: | |
454 msg = "\nOutput exceeded %i bytes, remaining output has been
truncated\n" % self.logMaxSize | |
455 self.addEntry(HEADER, msg) | |
456 self.merge() | |
457 self.maxLengthExceeded = True | |
458 | |
459 if self.logMaxTailSize: | |
460 # Update the tail buffer | |
461 self.tailBuffer.append((channel, text)) | |
462 self.tailLength += len(text) | |
463 while self.tailLength > self.logMaxTailSize: | |
464 # Drop some stuff off the beginning of the buffer | |
465 c,t = self.tailBuffer.pop(0) | |
466 n = len(t) | |
467 self.tailLength -= n | |
468 assert self.tailLength >= 0 | |
469 return | |
470 | |
471 self.nonHeaderLength += len(text) | |
472 | |
473 # we only add to .runEntries here. merge() is responsible for adding | |
474 # merged chunks to .entries | |
475 if self.runEntries and channel != self.runEntries[0][0]: | |
476 self.merge() | |
477 self.runEntries.append((channel, text)) | |
478 self.runLength += len(text) | |
479 if self.runLength >= self.chunkSize: | |
480 self.merge() | |
481 | |
482 for w in self.watchers: | |
483 w.logChunk(self.step.build, self.step, self, channel, text) | |
484 self.length += len(text) | |
485 | |
486 def addStdout(self, text): | |
487 self.addEntry(STDOUT, text) | |
488 def addStderr(self, text): | |
489 self.addEntry(STDERR, text) | |
490 def addHeader(self, text): | |
491 self.addEntry(HEADER, text) | |
492 | |
493 def finish(self): | |
494 if self.tailBuffer: | |
495 msg = "\nFinal %i bytes follow below:\n" % self.tailLength | |
496 tmp = self.runEntries | |
497 self.runEntries = [(HEADER, msg)] | |
498 self.merge() | |
499 self.runEntries = self.tailBuffer | |
500 self.merge() | |
501 self.runEntries = tmp | |
502 self.merge() | |
503 self.tailBuffer = [] | |
504 else: | |
505 self.merge() | |
506 | |
507 if self.openfile: | |
508 # we don't do an explicit close, because there might be readers | |
509 # shareing the filehandle. As soon as they stop reading, the | |
510 # filehandle will be released and automatically closed. | |
511 self.openfile.flush() | |
512 del self.openfile | |
513 self.finished = True | |
514 watchers = self.finishedWatchers | |
515 self.finishedWatchers = [] | |
516 for w in watchers: | |
517 w.callback(self) | |
518 self.watchers = [] | |
519 | |
520 | |
521 def compressLog(self): | |
522 # bail out if there's no compression support | |
523 if self.compressMethod == "bz2": | |
524 if BZ2File is None: | |
525 return | |
526 compressed = self.getFilename() + ".bz2.tmp" | |
527 elif self.compressMethod == "gz": | |
528 if GzipFile is None: | |
529 return | |
530 compressed = self.getFilename() + ".gz.tmp" | |
531 d = threads.deferToThread(self._compressLog, compressed) | |
532 d.addCallback(self._renameCompressedLog, compressed) | |
533 d.addErrback(self._cleanupFailedCompress, compressed) | |
534 return d | |
535 | |
536 def _compressLog(self, compressed): | |
537 infile = self.getFile() | |
538 if self.compressMethod == "bz2": | |
539 cf = BZ2File(compressed, 'w') | |
540 elif self.compressMethod == "gz": | |
541 cf = GzipFile(compressed, 'w') | |
542 bufsize = 1024*1024 | |
543 while True: | |
544 buf = infile.read(bufsize) | |
545 cf.write(buf) | |
546 if len(buf) < bufsize: | |
547 break | |
548 cf.close() | |
549 def _renameCompressedLog(self, rv, compressed): | |
550 if self.compressMethod == "bz2": | |
551 filename = self.getFilename() + '.bz2' | |
552 else: | |
553 filename = self.getFilename() + '.gz' | |
554 if sys.platform == 'win32': | |
555 # windows cannot rename a file on top of an existing one, so | |
556 # fall back to delete-first. There are ways this can fail and | |
557 # lose the builder's history, so we avoid using it in the | |
558 # general (non-windows) case | |
559 if os.path.exists(filename): | |
560 os.unlink(filename) | |
561 os.rename(compressed, filename) | |
562 _tryremove(self.getFilename(), 1, 5) | |
563 def _cleanupFailedCompress(self, failure, compressed): | |
564 log.msg("failed to compress %s" % self.getFilename()) | |
565 if os.path.exists(compressed): | |
566 _tryremove(compressed, 1, 5) | |
567 failure.trap() # reraise the failure | |
568 | |
569 # persistence stuff | |
570 def __getstate__(self): | |
571 d = self.__dict__.copy() | |
572 del d['step'] # filled in upon unpickling | |
573 del d['watchers'] | |
574 del d['finishedWatchers'] | |
575 d['entries'] = [] # let 0.6.4 tolerate the saved log. TODO: really? | |
576 if d.has_key('finished'): | |
577 del d['finished'] | |
578 if d.has_key('openfile'): | |
579 del d['openfile'] | |
580 return d | |
581 | |
582 def __setstate__(self, d): | |
583 self.__dict__ = d | |
584 self.watchers = [] # probably not necessary | |
585 self.finishedWatchers = [] # same | |
586 # self.step must be filled in by our parent | |
587 self.finished = True | |
588 | |
589 def upgrade(self, logfilename): | |
590 """Save our .entries to a new-style offline log file (if necessary), | |
591 and modify our in-memory representation to use it. The original | |
592 pickled LogFile (inside the pickled Build) won't be modified.""" | |
593 self.filename = logfilename | |
594 if not os.path.exists(self.getFilename()): | |
595 self.openfile = open(self.getFilename(), "w") | |
596 self.finished = False | |
597 for channel,text in self.entries: | |
598 self.addEntry(channel, text) | |
599 self.finish() # releases self.openfile, which will be closed | |
600 del self.entries | |
601 | |
602 class HTMLLogFile: | |
603 implements(interfaces.IStatusLog) | |
604 | |
605 filename = None | |
606 | |
607 def __init__(self, parent, name, logfilename, html): | |
608 self.step = parent | |
609 self.name = name | |
610 self.filename = logfilename | |
611 self.html = html | |
612 | |
613 def getName(self): | |
614 return self.name # set in BuildStepStatus.addLog | |
615 def getStep(self): | |
616 return self.step | |
617 | |
618 def isFinished(self): | |
619 return True | |
620 def waitUntilFinished(self): | |
621 return defer.succeed(self) | |
622 | |
623 def hasContents(self): | |
624 return True | |
625 def getText(self): | |
626 return self.html # looks kinda like text | |
627 def getTextWithHeaders(self): | |
628 return self.html | |
629 def getChunks(self): | |
630 return [(STDERR, self.html)] | |
631 | |
632 def subscribe(self, receiver, catchup): | |
633 pass | |
634 def unsubscribe(self, receiver): | |
635 pass | |
636 | |
637 def finish(self): | |
638 pass | |
639 | |
640 def __getstate__(self): | |
641 d = self.__dict__.copy() | |
642 del d['step'] | |
643 return d | |
644 | |
645 def upgrade(self, logfilename): | |
646 pass | |
647 | |
648 | |
649 class Event: | |
650 implements(interfaces.IStatusEvent) | |
651 | |
652 started = None | |
653 finished = None | |
654 text = [] | |
655 | |
656 # IStatusEvent methods | |
657 def getTimes(self): | |
658 return (self.started, self.finished) | |
659 def getText(self): | |
660 return self.text | |
661 def getLogs(self): | |
662 return [] | |
663 | |
664 def finish(self): | |
665 self.finished = util.now() | |
666 | |
667 class TestResult: | |
668 implements(interfaces.ITestResult) | |
669 | |
670 def __init__(self, name, results, text, logs): | |
671 assert isinstance(name, tuple) | |
672 self.name = name | |
673 self.results = results | |
674 self.text = text | |
675 self.logs = logs | |
676 | |
677 def getName(self): | |
678 return self.name | |
679 | |
680 def getResults(self): | |
681 return self.results | |
682 | |
683 def getText(self): | |
684 return self.text | |
685 | |
686 def getLogs(self): | |
687 return self.logs | |
688 | |
689 | |
690 class BuildSetStatus: | |
691 implements(interfaces.IBuildSetStatus) | |
692 | |
693 def __init__(self, source, reason, builderNames, bsid=None): | |
694 self.source = source | |
695 self.reason = reason | |
696 self.builderNames = builderNames | |
697 self.id = bsid | |
698 self.successWatchers = [] | |
699 self.finishedWatchers = [] | |
700 self.stillHopeful = True | |
701 self.finished = False | |
702 self.results = None | |
703 | |
704 def setBuildRequestStatuses(self, buildRequestStatuses): | |
705 self.buildRequests = buildRequestStatuses | |
706 def setResults(self, results): | |
707 # the build set succeeds only if all its component builds succeed | |
708 self.results = results | |
709 def giveUpHope(self): | |
710 self.stillHopeful = False | |
711 | |
712 | |
713 def notifySuccessWatchers(self): | |
714 for d in self.successWatchers: | |
715 d.callback(self) | |
716 self.successWatchers = [] | |
717 | |
718 def notifyFinishedWatchers(self): | |
719 self.finished = True | |
720 for d in self.finishedWatchers: | |
721 d.callback(self) | |
722 self.finishedWatchers = [] | |
723 | |
724 # methods for our clients | |
725 | |
726 def getSourceStamp(self): | |
727 return self.source | |
728 def getReason(self): | |
729 return self.reason | |
730 def getResults(self): | |
731 return self.results | |
732 def getID(self): | |
733 return self.id | |
734 | |
735 def getBuilderNames(self): | |
736 return self.builderNames | |
737 def getBuildRequests(self): | |
738 return self.buildRequests | |
739 def isFinished(self): | |
740 return self.finished | |
741 | |
742 def waitUntilSuccess(self): | |
743 if self.finished or not self.stillHopeful: | |
744 # the deferreds have already fired | |
745 return defer.succeed(self) | |
746 d = defer.Deferred() | |
747 self.successWatchers.append(d) | |
748 return d | |
749 | |
750 def waitUntilFinished(self): | |
751 if self.finished: | |
752 return defer.succeed(self) | |
753 d = defer.Deferred() | |
754 self.finishedWatchers.append(d) | |
755 return d | |
756 | |
757 def asDict(self): | |
758 result = {} | |
759 # Constant | |
760 result['source'] = self.getSourceStamp().asDict() | |
761 result['reason'] = self.getReason() | |
762 result['results'] = self.getResults() | |
763 result['builderNames'] = self.getBuilderNames() | |
764 result['isFinished'] = self.isFinished() | |
765 | |
766 # Transient | |
767 result['buildRequests'] = [ | |
768 build.asDict() for build in self.getBuildRequests()] | |
769 return result | |
770 | |
771 | |
772 class BuildRequestStatus: | |
773 implements(interfaces.IBuildRequestStatus) | |
774 | |
775 def __init__(self, source, builderName): | |
776 self.source = source | |
777 self.builderName = builderName | |
778 self.builds = [] # list of BuildStatus objects | |
779 self.observers = [] | |
780 self.submittedAt = None | |
781 | |
782 def buildStarted(self, build): | |
783 self.builds.append(build) | |
784 for o in self.observers[:]: | |
785 o(build) | |
786 | |
787 # methods called by our clients | |
788 def getSourceStamp(self): | |
789 return self.source | |
790 def getBuilderName(self): | |
791 return self.builderName | |
792 def getBuilds(self): | |
793 return self.builds | |
794 | |
795 def subscribe(self, observer): | |
796 self.observers.append(observer) | |
797 for b in self.builds: | |
798 observer(b) | |
799 def unsubscribe(self, observer): | |
800 self.observers.remove(observer) | |
801 | |
802 def getSubmitTime(self): | |
803 return self.submittedAt | |
804 def setSubmitTime(self, t): | |
805 self.submittedAt = t | |
806 | |
807 def asDict(self): | |
808 result = {} | |
809 # Constant | |
810 result['source'] = self.source.asDict() | |
811 result['builderName'] = self.getBuilderName() | |
812 result['submittedAt'] = self.getSubmitTime() | |
813 | |
814 # Transient | |
815 result['builds'] = [build.getNumber() for build in self.getBuilds()] | |
816 return result | |
817 | |
818 | |
819 class BuildStepStatus(styles.Versioned): | |
820 """ | |
821 I represent a collection of output status for a | |
822 L{buildbot.process.step.BuildStep}. | |
823 | |
824 Statistics contain any information gleaned from a step that is | |
825 not in the form of a logfile. As an example, steps that run | |
826 tests might gather statistics about the number of passed, failed, | |
827 or skipped tests. | |
828 | |
829 @type progress: L{buildbot.status.progress.StepProgress} | |
830 @cvar progress: tracks ETA for the step | |
831 @type text: list of strings | |
832 @cvar text: list of short texts that describe the command and its status | |
833 @type text2: list of strings | |
834 @cvar text2: list of short texts added to the overall build description | |
835 @type logs: dict of string -> L{buildbot.status.builder.LogFile} | |
836 @ivar logs: logs of steps | |
837 @type statistics: dict | |
838 @ivar statistics: results from running this step | |
839 """ | |
840 # note that these are created when the Build is set up, before each | |
841 # corresponding BuildStep has started. | |
842 implements(interfaces.IBuildStepStatus, interfaces.IStatusEvent) | |
843 persistenceVersion = 3 | |
844 | |
845 started = None | |
846 finished = None | |
847 progress = None | |
848 text = [] | |
849 results = (None, []) | |
850 text2 = [] | |
851 watchers = [] | |
852 updates = {} | |
853 finishedWatchers = [] | |
854 statistics = {} | |
855 step_number = None | |
856 | |
857 def __init__(self, parent, step_number): | |
858 assert interfaces.IBuildStatus(parent) | |
859 self.build = parent | |
860 self.step_number = step_number | |
861 self.logs = [] | |
862 self.urls = {} | |
863 self.watchers = [] | |
864 self.updates = {} | |
865 self.finishedWatchers = [] | |
866 self.statistics = {} | |
867 | |
868 def getName(self): | |
869 """Returns a short string with the name of this step. This string | |
870 may have spaces in it.""" | |
871 return self.name | |
872 | |
873 def getBuild(self): | |
874 return self.build | |
875 | |
876 def getTimes(self): | |
877 return (self.started, self.finished) | |
878 | |
879 def getExpectations(self): | |
880 """Returns a list of tuples (name, current, target).""" | |
881 if not self.progress: | |
882 return [] | |
883 ret = [] | |
884 metrics = self.progress.progress.keys() | |
885 metrics.sort() | |
886 for m in metrics: | |
887 t = (m, self.progress.progress[m], self.progress.expectations[m]) | |
888 ret.append(t) | |
889 return ret | |
890 | |
891 def getLogs(self): | |
892 return self.logs | |
893 | |
894 def getURLs(self): | |
895 return self.urls.copy() | |
896 | |
897 def isStarted(self): | |
898 return (self.started is not None) | |
899 | |
900 def isFinished(self): | |
901 return (self.finished is not None) | |
902 | |
903 def waitUntilFinished(self): | |
904 if self.finished: | |
905 d = defer.succeed(self) | |
906 else: | |
907 d = defer.Deferred() | |
908 self.finishedWatchers.append(d) | |
909 return d | |
910 | |
911 # while the step is running, the following methods make sense. | |
912 # Afterwards they return None | |
913 | |
914 def getETA(self): | |
915 if self.started is None: | |
916 return None # not started yet | |
917 if self.finished is not None: | |
918 return None # already finished | |
919 if not self.progress: | |
920 return None # no way to predict | |
921 return self.progress.remaining() | |
922 | |
923 # Once you know the step has finished, the following methods are legal. | |
924 # Before this step has finished, they all return None. | |
925 | |
926 def getText(self): | |
927 """Returns a list of strings which describe the step. These are | |
928 intended to be displayed in a narrow column. If more space is | |
929 available, the caller should join them together with spaces before | |
930 presenting them to the user.""" | |
931 return self.text | |
932 | |
933 def getResults(self): | |
934 """Return a tuple describing the results of the step. | |
935 'result' is one of the constants in L{buildbot.status.builder}: | |
936 SUCCESS, WARNINGS, FAILURE, or SKIPPED. | |
937 'strings' is an optional list of strings that the step wants to | |
938 append to the overall build's results. These strings are usually | |
939 more terse than the ones returned by getText(): in particular, | |
940 successful Steps do not usually contribute any text to the | |
941 overall build. | |
942 | |
943 @rtype: tuple of int, list of strings | |
944 @returns: (result, strings) | |
945 """ | |
946 return (self.results, self.text2) | |
947 | |
948 def hasStatistic(self, name): | |
949 """Return true if this step has a value for the given statistic. | |
950 """ | |
951 return self.statistics.has_key(name) | |
952 | |
953 def getStatistic(self, name, default=None): | |
954 """Return the given statistic, if present | |
955 """ | |
956 return self.statistics.get(name, default) | |
957 | |
958 # subscription interface | |
959 | |
960 def subscribe(self, receiver, updateInterval=10): | |
961 # will get logStarted, logFinished, stepETAUpdate | |
962 assert receiver not in self.watchers | |
963 self.watchers.append(receiver) | |
964 self.sendETAUpdate(receiver, updateInterval) | |
965 | |
966 def sendETAUpdate(self, receiver, updateInterval): | |
967 self.updates[receiver] = None | |
968 # they might unsubscribe during stepETAUpdate | |
969 receiver.stepETAUpdate(self.build, self, | |
970 self.getETA(), self.getExpectations()) | |
971 if receiver in self.watchers: | |
972 self.updates[receiver] = reactor.callLater(updateInterval, | |
973 self.sendETAUpdate, | |
974 receiver, | |
975 updateInterval) | |
976 | |
977 def unsubscribe(self, receiver): | |
978 if receiver in self.watchers: | |
979 self.watchers.remove(receiver) | |
980 if receiver in self.updates: | |
981 if self.updates[receiver] is not None: | |
982 self.updates[receiver].cancel() | |
983 del self.updates[receiver] | |
984 | |
985 | |
986 # methods to be invoked by the BuildStep | |
987 | |
988 def setName(self, stepname): | |
989 self.name = stepname | |
990 | |
991 def setColor(self, color): | |
992 log.msg("BuildStepStatus.setColor is no longer supported -- ignoring col
or %s" % (color,)) | |
993 | |
994 def setProgress(self, stepprogress): | |
995 self.progress = stepprogress | |
996 | |
997 def stepStarted(self): | |
998 self.started = util.now() | |
999 if self.build: | |
1000 self.build.stepStarted(self) | |
1001 | |
1002 def addLog(self, name): | |
1003 assert self.started # addLog before stepStarted won't notify watchers | |
1004 logfilename = self.build.generateLogfileName(self.name, name) | |
1005 log = LogFile(self, name, logfilename) | |
1006 log.logMaxSize = self.build.builder.logMaxSize | |
1007 log.logMaxTailSize = self.build.builder.logMaxTailSize | |
1008 log.compressMethod = self.build.builder.logCompressionMethod | |
1009 self.logs.append(log) | |
1010 for w in self.watchers: | |
1011 receiver = w.logStarted(self.build, self, log) | |
1012 if receiver: | |
1013 log.subscribe(receiver, True) | |
1014 d = log.waitUntilFinished() | |
1015 d.addCallback(lambda log: log.unsubscribe(receiver)) | |
1016 d = log.waitUntilFinished() | |
1017 d.addCallback(self.logFinished) | |
1018 return log | |
1019 | |
1020 def addHTMLLog(self, name, html): | |
1021 assert self.started # addLog before stepStarted won't notify watchers | |
1022 logfilename = self.build.generateLogfileName(self.name, name) | |
1023 log = HTMLLogFile(self, name, logfilename, html) | |
1024 self.logs.append(log) | |
1025 for w in self.watchers: | |
1026 receiver = w.logStarted(self.build, self, log) | |
1027 # TODO: think about this: there isn't much point in letting | |
1028 # them subscribe | |
1029 #if receiver: | |
1030 # log.subscribe(receiver, True) | |
1031 w.logFinished(self.build, self, log) | |
1032 | |
1033 def logFinished(self, log): | |
1034 for w in self.watchers: | |
1035 w.logFinished(self.build, self, log) | |
1036 | |
1037 def addURL(self, name, url): | |
1038 self.urls[name] = url | |
1039 | |
1040 def setText(self, text): | |
1041 self.text = text | |
1042 for w in self.watchers: | |
1043 w.stepTextChanged(self.build, self, text) | |
1044 def setText2(self, text): | |
1045 self.text2 = text | |
1046 for w in self.watchers: | |
1047 w.stepText2Changed(self.build, self, text) | |
1048 | |
1049 def setStatistic(self, name, value): | |
1050 """Set the given statistic. Usually called by subclasses. | |
1051 """ | |
1052 self.statistics[name] = value | |
1053 | |
1054 def stepFinished(self, results): | |
1055 self.finished = util.now() | |
1056 self.results = results | |
1057 cld = [] # deferreds for log compression | |
1058 logCompressionLimit = self.build.builder.logCompressionLimit | |
1059 for loog in self.logs: | |
1060 if not loog.isFinished(): | |
1061 loog.finish() | |
1062 # if log compression is on, and it's a real LogFile, | |
1063 # HTMLLogFiles aren't files | |
1064 if logCompressionLimit is not False and \ | |
1065 isinstance(loog, LogFile): | |
1066 if os.path.getsize(loog.getFilename()) > logCompressionLimit: | |
1067 loog_deferred = loog.compressLog() | |
1068 if loog_deferred: | |
1069 cld.append(loog_deferred) | |
1070 | |
1071 for r in self.updates.keys(): | |
1072 if self.updates[r] is not None: | |
1073 self.updates[r].cancel() | |
1074 del self.updates[r] | |
1075 | |
1076 watchers = self.finishedWatchers | |
1077 self.finishedWatchers = [] | |
1078 for w in watchers: | |
1079 w.callback(self) | |
1080 if cld: | |
1081 return defer.DeferredList(cld) | |
1082 | |
1083 def checkLogfiles(self): | |
1084 # filter out logs that have been deleted | |
1085 self.logs = [ l for l in self.logs if l.hasContents() ] | |
1086 | |
1087 # persistence | |
1088 | |
1089 def __getstate__(self): | |
1090 d = styles.Versioned.__getstate__(self) | |
1091 del d['build'] # filled in when loading | |
1092 if d.has_key('progress'): | |
1093 del d['progress'] | |
1094 del d['watchers'] | |
1095 del d['finishedWatchers'] | |
1096 del d['updates'] | |
1097 return d | |
1098 | |
1099 def __setstate__(self, d): | |
1100 styles.Versioned.__setstate__(self, d) | |
1101 # self.build must be filled in by our parent | |
1102 | |
1103 # point the logs to this object | |
1104 for loog in self.logs: | |
1105 loog.step = self | |
1106 | |
1107 def upgradeToVersion1(self): | |
1108 if not hasattr(self, "urls"): | |
1109 self.urls = {} | |
1110 | |
1111 def upgradeToVersion2(self): | |
1112 if not hasattr(self, "statistics"): | |
1113 self.statistics = {} | |
1114 | |
1115 def upgradeToVersion3(self): | |
1116 if not hasattr(self, "step_number"): | |
1117 self.step_number = 0 | |
1118 | |
1119 def asDict(self): | |
1120 result = {} | |
1121 # Constant | |
1122 result['name'] = self.getName() | |
1123 | |
1124 # Transient | |
1125 result['text'] = self.getText() | |
1126 result['results'] = self.getResults() | |
1127 result['isStarted'] = self.isStarted() | |
1128 result['isFinished'] = self.isFinished() | |
1129 result['statistics'] = self.statistics | |
1130 result['times'] = self.getTimes() | |
1131 result['expectations'] = self.getExpectations() | |
1132 result['eta'] = self.getETA() | |
1133 result['urls'] = self.getURLs() | |
1134 result['step_number'] = self.step_number | |
1135 result['logs'] = [[l.getName(), | |
1136 self.build.builder.status.getURLForThing(l)] | |
1137 for l in self.getLogs()] | |
1138 return result | |
1139 | |
1140 | |
1141 class BuildStatus(styles.Versioned): | |
1142 implements(interfaces.IBuildStatus, interfaces.IStatusEvent) | |
1143 persistenceVersion = 3 | |
1144 | |
1145 source = None | |
1146 reason = None | |
1147 changes = [] | |
1148 blamelist = [] | |
1149 requests = [] | |
1150 progress = None | |
1151 started = None | |
1152 finished = None | |
1153 currentStep = None | |
1154 text = [] | |
1155 results = None | |
1156 slavename = "???" | |
1157 | |
1158 # these lists/dicts are defined here so that unserialized instances have | |
1159 # (empty) values. They are set in __init__ to new objects to make sure | |
1160 # each instance gets its own copy. | |
1161 watchers = [] | |
1162 updates = {} | |
1163 finishedWatchers = [] | |
1164 testResults = {} | |
1165 | |
1166 def __init__(self, parent, number): | |
1167 """ | |
1168 @type parent: L{BuilderStatus} | |
1169 @type number: int | |
1170 """ | |
1171 assert interfaces.IBuilderStatus(parent) | |
1172 self.builder = parent | |
1173 self.number = number | |
1174 self.watchers = [] | |
1175 self.updates = {} | |
1176 self.finishedWatchers = [] | |
1177 self.steps = [] | |
1178 self.testResults = {} | |
1179 self.properties = Properties() | |
1180 self.requests = [] | |
1181 | |
1182 def __repr__(self): | |
1183 return "<%s #%s>" % (self.__class__.__name__, self.number) | |
1184 | |
1185 # IBuildStatus | |
1186 | |
1187 def getBuilder(self): | |
1188 """ | |
1189 @rtype: L{BuilderStatus} | |
1190 """ | |
1191 return self.builder | |
1192 | |
1193 def getProperty(self, propname): | |
1194 return self.properties[propname] | |
1195 | |
1196 def getProperties(self): | |
1197 return self.properties | |
1198 | |
1199 def getNumber(self): | |
1200 return self.number | |
1201 | |
1202 def getPreviousBuild(self): | |
1203 if self.number == 0: | |
1204 return None | |
1205 return self.builder.getBuild(self.number-1) | |
1206 | |
1207 def getSourceStamp(self, absolute=False): | |
1208 if not absolute or not self.properties.has_key('got_revision'): | |
1209 return self.source | |
1210 return self.source.getAbsoluteSourceStamp(self.properties['got_revision'
]) | |
1211 | |
1212 def getReason(self): | |
1213 return self.reason | |
1214 | |
1215 def getChanges(self): | |
1216 return self.changes | |
1217 | |
1218 def getRequests(self): | |
1219 return self.requests | |
1220 | |
1221 def getResponsibleUsers(self): | |
1222 return self.blamelist | |
1223 | |
1224 def getInterestedUsers(self): | |
1225 # TODO: the Builder should add others: sheriffs, domain-owners | |
1226 return self.blamelist + self.properties.getProperty('owners', []) | |
1227 | |
1228 def getSteps(self): | |
1229 """Return a list of IBuildStepStatus objects. For invariant builds | |
1230 (those which always use the same set of Steps), this should be the | |
1231 complete list, however some of the steps may not have started yet | |
1232 (step.getTimes()[0] will be None). For variant builds, this may not | |
1233 be complete (asking again later may give you more of them).""" | |
1234 return self.steps | |
1235 | |
1236 def getTimes(self): | |
1237 return (self.started, self.finished) | |
1238 | |
1239 _sentinel = [] # used as a sentinel to indicate unspecified initial_value | |
1240 def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel): | |
1241 """Summarize the named statistic over all steps in which it | |
1242 exists, using combination_fn and initial_value to combine multiple | |
1243 results into a single result. This translates to a call to Python's | |
1244 X{reduce}:: | |
1245 return reduce(summary_fn, step_stats_list, initial_value) | |
1246 """ | |
1247 step_stats_list = [ | |
1248 st.getStatistic(name) | |
1249 for st in self.steps | |
1250 if st.hasStatistic(name) ] | |
1251 if initial_value is self._sentinel: | |
1252 return reduce(summary_fn, step_stats_list) | |
1253 else: | |
1254 return reduce(summary_fn, step_stats_list, initial_value) | |
1255 | |
1256 def isFinished(self): | |
1257 return (self.finished is not None) | |
1258 | |
1259 def waitUntilFinished(self): | |
1260 if self.finished: | |
1261 d = defer.succeed(self) | |
1262 else: | |
1263 d = defer.Deferred() | |
1264 self.finishedWatchers.append(d) | |
1265 return d | |
1266 | |
1267 # while the build is running, the following methods make sense. | |
1268 # Afterwards they return None | |
1269 | |
1270 def getETA(self): | |
1271 if self.finished is not None: | |
1272 return None | |
1273 if not self.progress: | |
1274 return None | |
1275 eta = self.progress.eta() | |
1276 if eta is None: | |
1277 return None | |
1278 return eta - util.now() | |
1279 | |
1280 def getCurrentStep(self): | |
1281 return self.currentStep | |
1282 | |
1283 # Once you know the build has finished, the following methods are legal. | |
1284 # Before ths build has finished, they all return None. | |
1285 | |
1286 def getText(self): | |
1287 text = [] | |
1288 text.extend(self.text) | |
1289 for s in self.steps: | |
1290 text.extend(s.text2) | |
1291 return text | |
1292 | |
1293 def getResults(self): | |
1294 return self.results | |
1295 | |
1296 def getSlavename(self): | |
1297 return self.slavename | |
1298 | |
1299 def getTestResults(self): | |
1300 return self.testResults | |
1301 | |
1302 def getLogs(self): | |
1303 # TODO: steps should contribute significant logs instead of this | |
1304 # hack, which returns every log from every step. The logs should get | |
1305 # names like "compile" and "test" instead of "compile.output" | |
1306 logs = [] | |
1307 for s in self.steps: | |
1308 for log in s.getLogs(): | |
1309 logs.append(log) | |
1310 return logs | |
1311 | |
1312 # subscription interface | |
1313 | |
1314 def subscribe(self, receiver, updateInterval=None): | |
1315 # will receive stepStarted and stepFinished messages | |
1316 # and maybe buildETAUpdate | |
1317 self.watchers.append(receiver) | |
1318 if updateInterval is not None: | |
1319 self.sendETAUpdate(receiver, updateInterval) | |
1320 | |
1321 def sendETAUpdate(self, receiver, updateInterval): | |
1322 self.updates[receiver] = None | |
1323 ETA = self.getETA() | |
1324 if ETA is not None: | |
1325 receiver.buildETAUpdate(self, self.getETA()) | |
1326 # they might have unsubscribed during buildETAUpdate | |
1327 if receiver in self.watchers: | |
1328 self.updates[receiver] = reactor.callLater(updateInterval, | |
1329 self.sendETAUpdate, | |
1330 receiver, | |
1331 updateInterval) | |
1332 | |
1333 def unsubscribe(self, receiver): | |
1334 if receiver in self.watchers: | |
1335 self.watchers.remove(receiver) | |
1336 if receiver in self.updates: | |
1337 if self.updates[receiver] is not None: | |
1338 self.updates[receiver].cancel() | |
1339 del self.updates[receiver] | |
1340 | |
1341 # methods for the base.Build to invoke | |
1342 | |
1343 def addStepWithName(self, name): | |
1344 """The Build is setting up, and has added a new BuildStep to its | |
1345 list. Create a BuildStepStatus object to which it can send status | |
1346 updates.""" | |
1347 | |
1348 s = BuildStepStatus(self, len(self.steps)) | |
1349 s.setName(name) | |
1350 self.steps.append(s) | |
1351 return s | |
1352 | |
1353 def setProperty(self, propname, value, source): | |
1354 self.properties.setProperty(propname, value, source) | |
1355 | |
1356 def addTestResult(self, result): | |
1357 self.testResults[result.getName()] = result | |
1358 | |
1359 def setSourceStamp(self, sourceStamp): | |
1360 self.source = sourceStamp | |
1361 self.changes = self.source.changes | |
1362 | |
1363 def setRequests(self, requests): | |
1364 self.requests = requests | |
1365 | |
1366 def setReason(self, reason): | |
1367 self.reason = reason | |
1368 def setBlamelist(self, blamelist): | |
1369 self.blamelist = blamelist | |
1370 def setProgress(self, progress): | |
1371 self.progress = progress | |
1372 | |
1373 def buildStarted(self, build): | |
1374 """The Build has been set up and is about to be started. It can now | |
1375 be safely queried, so it is time to announce the new build.""" | |
1376 | |
1377 self.started = util.now() | |
1378 # now that we're ready to report status, let the BuilderStatus tell | |
1379 # the world about us | |
1380 self.builder.buildStarted(self) | |
1381 | |
1382 def setSlavename(self, slavename): | |
1383 self.slavename = slavename | |
1384 | |
1385 def setText(self, text): | |
1386 assert isinstance(text, (list, tuple)) | |
1387 self.text = text | |
1388 def setResults(self, results): | |
1389 self.results = results | |
1390 | |
1391 def buildFinished(self): | |
1392 self.currentStep = None | |
1393 self.finished = util.now() | |
1394 | |
1395 for r in self.updates.keys(): | |
1396 if self.updates[r] is not None: | |
1397 self.updates[r].cancel() | |
1398 del self.updates[r] | |
1399 | |
1400 watchers = self.finishedWatchers | |
1401 self.finishedWatchers = [] | |
1402 for w in watchers: | |
1403 w.callback(self) | |
1404 | |
1405 # methods called by our BuildStepStatus children | |
1406 | |
1407 def stepStarted(self, step): | |
1408 self.currentStep = step | |
1409 name = self.getBuilder().getName() | |
1410 for w in self.watchers: | |
1411 receiver = w.stepStarted(self, step) | |
1412 if receiver: | |
1413 if type(receiver) == type(()): | |
1414 step.subscribe(receiver[0], receiver[1]) | |
1415 else: | |
1416 step.subscribe(receiver) | |
1417 d = step.waitUntilFinished() | |
1418 d.addCallback(lambda step: step.unsubscribe(receiver)) | |
1419 | |
1420 step.waitUntilFinished().addCallback(self._stepFinished) | |
1421 | |
1422 def _stepFinished(self, step): | |
1423 results = step.getResults() | |
1424 for w in self.watchers: | |
1425 w.stepFinished(self, step, results) | |
1426 | |
1427 # methods called by our BuilderStatus parent | |
1428 | |
1429 def pruneSteps(self): | |
1430 # this build is very old: remove the build steps too | |
1431 self.steps = [] | |
1432 | |
1433 # persistence stuff | |
1434 | |
1435 def generateLogfileName(self, stepname, logname): | |
1436 """Return a filename (relative to the Builder's base directory) where | |
1437 the logfile's contents can be stored uniquely. | |
1438 | |
1439 The base filename is made by combining our build number, the Step's | |
1440 name, and the log's name, then removing unsuitable characters. The | |
1441 filename is then made unique by appending _0, _1, etc, until it does | |
1442 not collide with any other logfile. | |
1443 | |
1444 These files are kept in the Builder's basedir (rather than a | |
1445 per-Build subdirectory) because that makes cleanup easier: cron and | |
1446 find will help get rid of the old logs, but the empty directories are | |
1447 more of a hassle to remove.""" | |
1448 | |
1449 starting_filename = "%d-log-%s-%s" % (self.number, stepname, logname) | |
1450 starting_filename = re.sub(r'[^\w\.\-]', '_', starting_filename) | |
1451 # now make it unique | |
1452 unique_counter = 0 | |
1453 filename = starting_filename | |
1454 while filename in [l.filename | |
1455 for step in self.steps | |
1456 for l in step.getLogs() | |
1457 if l.filename]: | |
1458 filename = "%s_%d" % (starting_filename, unique_counter) | |
1459 unique_counter += 1 | |
1460 return filename | |
1461 | |
1462 def __getstate__(self): | |
1463 d = styles.Versioned.__getstate__(self) | |
1464 # for now, a serialized Build is always "finished". We will never | |
1465 # save unfinished builds. | |
1466 if not self.finished: | |
1467 d['finished'] = True | |
1468 # TODO: push an "interrupted" step so it is clear that the build | |
1469 # was interrupted. The builder will have a 'shutdown' event, but | |
1470 # someone looking at just this build will be confused as to why | |
1471 # the last log is truncated. | |
1472 for k in 'builder', 'watchers', 'updates', 'requests', 'finishedWatchers
': | |
1473 if k in d: del d[k] | |
1474 return d | |
1475 | |
1476 def __setstate__(self, d): | |
1477 styles.Versioned.__setstate__(self, d) | |
1478 # self.builder must be filled in by our parent when loading | |
1479 for step in self.steps: | |
1480 step.build = self | |
1481 self.watchers = [] | |
1482 self.updates = {} | |
1483 self.finishedWatchers = [] | |
1484 | |
1485 def upgradeToVersion1(self): | |
1486 if hasattr(self, "sourceStamp"): | |
1487 # the old .sourceStamp attribute wasn't actually very useful | |
1488 maxChangeNumber, patch = self.sourceStamp | |
1489 changes = getattr(self, 'changes', []) | |
1490 source = sourcestamp.SourceStamp(branch=None, | |
1491 revision=None, | |
1492 patch=patch, | |
1493 changes=changes) | |
1494 self.source = source | |
1495 self.changes = source.changes | |
1496 del self.sourceStamp | |
1497 | |
1498 def upgradeToVersion2(self): | |
1499 self.properties = {} | |
1500 | |
1501 def upgradeToVersion3(self): | |
1502 # in version 3, self.properties became a Properties object | |
1503 propdict = self.properties | |
1504 self.properties = Properties() | |
1505 self.properties.update(propdict, "Upgrade from previous version") | |
1506 | |
1507 def upgradeLogfiles(self): | |
1508 # upgrade any LogFiles that need it. This must occur after we've been | |
1509 # attached to our Builder, and after we know about all LogFiles of | |
1510 # all Steps (to get the filenames right). | |
1511 assert self.builder | |
1512 for s in self.steps: | |
1513 for l in s.getLogs(): | |
1514 if l.filename: | |
1515 pass # new-style, log contents are on disk | |
1516 else: | |
1517 logfilename = self.generateLogfileName(s.name, l.name) | |
1518 # let the logfile update its .filename pointer, | |
1519 # transferring its contents onto disk if necessary | |
1520 l.upgrade(logfilename) | |
1521 | |
1522 def checkLogfiles(self): | |
1523 # check that all logfiles exist, and remove references to any that | |
1524 # have been deleted (e.g., by purge()) | |
1525 for s in self.steps: | |
1526 s.checkLogfiles() | |
1527 | |
1528 def saveYourself(self): | |
1529 filename = os.path.join(self.builder.basedir, "%d" % self.number) | |
1530 if os.path.isdir(filename): | |
1531 # leftover from 0.5.0, which stored builds in directories | |
1532 shutil.rmtree(filename, ignore_errors=True) | |
1533 tmpfilename = filename + ".tmp" | |
1534 try: | |
1535 dump(self, open(tmpfilename, "wb"), -1) | |
1536 if sys.platform == 'win32': | |
1537 # windows cannot rename a file on top of an existing one, so | |
1538 # fall back to delete-first. There are ways this can fail and | |
1539 # lose the builder's history, so we avoid using it in the | |
1540 # general (non-windows) case | |
1541 if os.path.exists(filename): | |
1542 os.unlink(filename) | |
1543 os.rename(tmpfilename, filename) | |
1544 except: | |
1545 log.msg("unable to save build %s-#%d" % (self.builder.name, | |
1546 self.number)) | |
1547 log.err() | |
1548 | |
1549 def asDict(self): | |
1550 result = {} | |
1551 # Constant | |
1552 result['builderName'] = self.builder.name | |
1553 result['number'] = self.getNumber() | |
1554 result['sourceStamp'] = self.getSourceStamp().asDict() | |
1555 result['reason'] = self.getReason() | |
1556 result['requests'] = [r.asDict() for r in self.getRequests()] | |
1557 result['blame'] = self.getResponsibleUsers() | |
1558 result['changes'] = [c.asText() for c in self.getChanges()] | |
1559 | |
1560 # Transient | |
1561 result['properties'] = self.getProperties().asList() | |
1562 result['times'] = self.getTimes() | |
1563 result['text'] = self.getText() | |
1564 result['results'] = self.getResults() | |
1565 result['slave'] = self.getSlavename() | |
1566 # TODO(maruel): Add. | |
1567 #result['test_results'] = self.getTestResults() | |
1568 result['logs'] = [[l.getName(), | |
1569 self.builder.status.getURLForThing(l)] for l in self.getLogs()] | |
1570 result['eta'] = self.getETA() | |
1571 result['steps'] = [bss.asDict() for bss in self.steps] | |
1572 if self.getCurrentStep(): | |
1573 result['currentStep'] = self.getCurrentStep().asDict() | |
1574 else: | |
1575 result['currentStep'] = None | |
1576 return result | |
1577 | |
1578 | |
1579 | |
1580 class BuilderStatus(styles.Versioned): | |
1581 """I handle status information for a single process.base.Builder object. | |
1582 That object sends status changes to me (frequently as Events), and I | |
1583 provide them on demand to the various status recipients, like the HTML | |
1584 waterfall display and the live status clients. It also sends build | |
1585 summaries to me, which I log and provide to status clients who aren't | |
1586 interested in seeing details of the individual build steps. | |
1587 | |
1588 I am responsible for maintaining the list of historic Events and Builds, | |
1589 pruning old ones, and loading them from / saving them to disk. | |
1590 | |
1591 I live in the buildbot.process.base.Builder object, in the | |
1592 .builder_status attribute. | |
1593 | |
1594 @type category: string | |
1595 @ivar category: user-defined category this builder belongs to; can be | |
1596 used to filter on in status clients | |
1597 """ | |
1598 | |
1599 implements(interfaces.IBuilderStatus, interfaces.IEventSource) | |
1600 persistenceVersion = 1 | |
1601 | |
1602 # these limit the amount of memory we consume, as well as the size of the | |
1603 # main Builder pickle. The Build and LogFile pickles on disk must be | |
1604 # handled separately. | |
1605 buildCacheSize = 15 | |
1606 eventHorizon = 50 # forget events beyond this | |
1607 | |
1608 # these limit on-disk storage | |
1609 logHorizon = 40 # forget logs in steps in builds beyond this | |
1610 buildHorizon = 100 # forget builds beyond this | |
1611 | |
1612 category = None | |
1613 currentBigState = "offline" # or idle/waiting/interlocked/building | |
1614 basedir = None # filled in by our parent | |
1615 | |
1616 def __init__(self, buildername, category=None): | |
1617 self.name = buildername | |
1618 self.category = category | |
1619 | |
1620 self.slavenames = [] | |
1621 self.events = [] | |
1622 # these three hold Events, and are used to retrieve the current | |
1623 # state of the boxes. | |
1624 self.lastBuildStatus = None | |
1625 #self.currentBig = None | |
1626 #self.currentSmall = None | |
1627 self.currentBuilds = [] | |
1628 self.pendingBuilds = [] | |
1629 self.nextBuild = None | |
1630 self.watchers = [] | |
1631 self.buildCache = weakref.WeakValueDictionary() | |
1632 self.buildCache_LRU = [] | |
1633 self.logCompressionLimit = False # default to no compression for tests | |
1634 self.logCompressionMethod = "bz2" | |
1635 self.logMaxSize = None # No default limit | |
1636 self.logMaxTailSize = None # No tail buffering | |
1637 | |
1638 # persistence | |
1639 | |
1640 def __getstate__(self): | |
1641 # when saving, don't record transient stuff like what builds are | |
1642 # currently running, because they won't be there when we start back | |
1643 # up. Nor do we save self.watchers, nor anything that gets set by our | |
1644 # parent like .basedir and .status | |
1645 d = styles.Versioned.__getstate__(self) | |
1646 d['watchers'] = [] | |
1647 del d['buildCache'] | |
1648 del d['buildCache_LRU'] | |
1649 for b in self.currentBuilds: | |
1650 b.saveYourself() | |
1651 # TODO: push a 'hey, build was interrupted' event | |
1652 del d['currentBuilds'] | |
1653 del d['pendingBuilds'] | |
1654 del d['currentBigState'] | |
1655 del d['basedir'] | |
1656 del d['status'] | |
1657 del d['nextBuildNumber'] | |
1658 return d | |
1659 | |
1660 def __setstate__(self, d): | |
1661 # when loading, re-initialize the transient stuff. Remember that | |
1662 # upgradeToVersion1 and such will be called after this finishes. | |
1663 styles.Versioned.__setstate__(self, d) | |
1664 self.buildCache = weakref.WeakValueDictionary() | |
1665 self.buildCache_LRU = [] | |
1666 self.currentBuilds = [] | |
1667 self.pendingBuilds = [] | |
1668 self.watchers = [] | |
1669 self.slavenames = [] | |
1670 # self.basedir must be filled in by our parent | |
1671 # self.status must be filled in by our parent | |
1672 | |
1673 def reconfigFromBuildmaster(self, buildmaster): | |
1674 # Note that we do not hang onto the buildmaster, since this object | |
1675 # gets pickled and unpickled. | |
1676 if buildmaster.buildCacheSize: | |
1677 self.buildCacheSize = buildmaster.buildCacheSize | |
1678 if buildmaster.eventHorizon: | |
1679 self.eventHorizon = buildmaster.eventHorizon | |
1680 if buildmaster.logHorizon: | |
1681 self.logHorizon = buildmaster.logHorizon | |
1682 if buildmaster.buildHorizon: | |
1683 self.buildHorizon = buildmaster.buildHorizon | |
1684 | |
1685 def upgradeToVersion1(self): | |
1686 if hasattr(self, 'slavename'): | |
1687 self.slavenames = [self.slavename] | |
1688 del self.slavename | |
1689 if hasattr(self, 'nextBuildNumber'): | |
1690 del self.nextBuildNumber # determineNextBuildNumber chooses this | |
1691 | |
1692 def determineNextBuildNumber(self): | |
1693 """Scan our directory of saved BuildStatus instances to determine | |
1694 what our self.nextBuildNumber should be. Set it one larger than the | |
1695 highest-numbered build we discover. This is called by the top-level | |
1696 Status object shortly after we are created or loaded from disk. | |
1697 """ | |
1698 existing_builds = [int(f) | |
1699 for f in os.listdir(self.basedir) | |
1700 if re.match("^\d+$", f)] | |
1701 if existing_builds: | |
1702 self.nextBuildNumber = max(existing_builds) + 1 | |
1703 else: | |
1704 self.nextBuildNumber = 0 | |
1705 | |
1706 def setLogCompressionLimit(self, lowerLimit): | |
1707 self.logCompressionLimit = lowerLimit | |
1708 | |
1709 def setLogCompressionMethod(self, method): | |
1710 assert method in ("bz2", "gz") | |
1711 self.logCompressionMethod = method | |
1712 | |
1713 def setLogMaxSize(self, upperLimit): | |
1714 self.logMaxSize = upperLimit | |
1715 | |
1716 def setLogMaxTailSize(self, tailSize): | |
1717 self.logMaxTailSize = tailSize | |
1718 | |
1719 def saveYourself(self): | |
1720 for b in self.currentBuilds: | |
1721 if not b.isFinished: | |
1722 # interrupted build, need to save it anyway. | |
1723 # BuildStatus.saveYourself will mark it as interrupted. | |
1724 b.saveYourself() | |
1725 filename = os.path.join(self.basedir, "builder") | |
1726 tmpfilename = filename + ".tmp" | |
1727 try: | |
1728 dump(self, open(tmpfilename, "wb"), -1) | |
1729 if sys.platform == 'win32': | |
1730 # windows cannot rename a file on top of an existing one | |
1731 if os.path.exists(filename): | |
1732 os.unlink(filename) | |
1733 os.rename(tmpfilename, filename) | |
1734 except: | |
1735 log.msg("unable to save builder %s" % self.name) | |
1736 log.err() | |
1737 | |
1738 | |
1739 # build cache management | |
1740 | |
1741 def makeBuildFilename(self, number): | |
1742 return os.path.join(self.basedir, "%d" % number) | |
1743 | |
1744 def touchBuildCache(self, build): | |
1745 self.buildCache[build.number] = build | |
1746 if build in self.buildCache_LRU: | |
1747 self.buildCache_LRU.remove(build) | |
1748 self.buildCache_LRU = self.buildCache_LRU[-(self.buildCacheSize-1):] + [
build ] | |
1749 return build | |
1750 | |
1751 def getBuildByNumber(self, number): | |
1752 # first look in currentBuilds | |
1753 for b in self.currentBuilds: | |
1754 if b.number == number: | |
1755 return self.touchBuildCache(b) | |
1756 | |
1757 # then in the buildCache | |
1758 if number in self.buildCache: | |
1759 return self.touchBuildCache(self.buildCache[number]) | |
1760 | |
1761 # then fall back to loading it from disk | |
1762 filename = self.makeBuildFilename(number) | |
1763 try: | |
1764 log.msg("Loading builder %s's build %d from on-disk pickle" | |
1765 % (self.name, number)) | |
1766 build = load(open(filename, "rb")) | |
1767 styles.doUpgrade() | |
1768 build.builder = self | |
1769 # handle LogFiles from after 0.5.0 and before 0.6.5 | |
1770 build.upgradeLogfiles() | |
1771 # check that logfiles exist | |
1772 build.checkLogfiles() | |
1773 return self.touchBuildCache(build) | |
1774 except IOError: | |
1775 raise IndexError("no such build %d" % number) | |
1776 except EOFError: | |
1777 raise IndexError("corrupted build pickle %d" % number) | |
1778 | |
1779 def prune(self): | |
1780 gc.collect() | |
1781 | |
1782 # begin by pruning our own events | |
1783 self.events = self.events[-self.eventHorizon:] | |
1784 | |
1785 # get the horizons straight | |
1786 if self.buildHorizon: | |
1787 earliest_build = self.nextBuildNumber - self.buildHorizon | |
1788 else: | |
1789 earliest_build = 0 | |
1790 | |
1791 if self.logHorizon: | |
1792 earliest_log = self.nextBuildNumber - self.logHorizon | |
1793 else: | |
1794 earliest_log = 0 | |
1795 | |
1796 if earliest_log < earliest_build: | |
1797 earliest_log = earliest_build | |
1798 | |
1799 if earliest_build == 0: | |
1800 return | |
1801 | |
1802 # skim the directory and delete anything that shouldn't be there anymore | |
1803 build_re = re.compile(r"^([0-9]+)$") | |
1804 build_log_re = re.compile(r"^([0-9]+)-.*$") | |
1805 # if the directory doesn't exist, bail out here | |
1806 if not os.path.exists(self.basedir): | |
1807 return | |
1808 | |
1809 for filename in os.listdir(self.basedir): | |
1810 num = None | |
1811 mo = build_re.match(filename) | |
1812 is_logfile = False | |
1813 if mo: | |
1814 num = int(mo.group(1)) | |
1815 else: | |
1816 mo = build_log_re.match(filename) | |
1817 if mo: | |
1818 num = int(mo.group(1)) | |
1819 is_logfile = True | |
1820 | |
1821 if num is None: continue | |
1822 if num in self.buildCache: continue | |
1823 | |
1824 if (is_logfile and num < earliest_log) or num < earliest_build: | |
1825 pathname = os.path.join(self.basedir, filename) | |
1826 log.msg("pruning '%s'" % pathname) | |
1827 try: os.unlink(pathname) | |
1828 except OSError: pass | |
1829 | |
1830 # IBuilderStatus methods | |
1831 def getName(self): | |
1832 return self.name | |
1833 | |
1834 def getState(self): | |
1835 return (self.currentBigState, self.currentBuilds) | |
1836 | |
1837 def getSlaves(self): | |
1838 return [self.status.getSlave(name) for name in self.slavenames] | |
1839 | |
1840 def getPendingBuilds(self): | |
1841 return self.pendingBuilds | |
1842 | |
1843 def getCurrentBuilds(self): | |
1844 return self.currentBuilds | |
1845 | |
1846 def getLastFinishedBuild(self): | |
1847 b = self.getBuild(-1) | |
1848 if not (b and b.isFinished()): | |
1849 b = self.getBuild(-2) | |
1850 return b | |
1851 | |
1852 def getCategory(self): | |
1853 return self.category | |
1854 | |
1855 def getBuild(self, number): | |
1856 if number < 0: | |
1857 number = self.nextBuildNumber + number | |
1858 if number < 0 or number >= self.nextBuildNumber: | |
1859 return None | |
1860 | |
1861 try: | |
1862 return self.getBuildByNumber(number) | |
1863 except IndexError: | |
1864 return None | |
1865 | |
1866 def getEvent(self, number): | |
1867 try: | |
1868 return self.events[number] | |
1869 except IndexError: | |
1870 return None | |
1871 | |
1872 def generateFinishedBuilds(self, branches=[], | |
1873 num_builds=None, | |
1874 max_buildnum=None, | |
1875 finished_before=None, | |
1876 max_search=200): | |
1877 got = 0 | |
1878 for Nb in itertools.count(1): | |
1879 if Nb > self.nextBuildNumber: | |
1880 break | |
1881 if Nb > max_search: | |
1882 break | |
1883 build = self.getBuild(-Nb) | |
1884 if build is None: | |
1885 continue | |
1886 if max_buildnum is not None: | |
1887 if build.getNumber() > max_buildnum: | |
1888 continue | |
1889 if not build.isFinished(): | |
1890 continue | |
1891 if finished_before is not None: | |
1892 start, end = build.getTimes() | |
1893 if end >= finished_before: | |
1894 continue | |
1895 if branches: | |
1896 if build.getSourceStamp().branch not in branches: | |
1897 continue | |
1898 got += 1 | |
1899 yield build | |
1900 if num_builds is not None: | |
1901 if got >= num_builds: | |
1902 return | |
1903 | |
1904 def eventGenerator(self, branches=[], categories=[], committers=[], minTime=
0): | |
1905 """This function creates a generator which will provide all of this | |
1906 Builder's status events, starting with the most recent and | |
1907 progressing backwards in time. """ | |
1908 | |
1909 # remember the oldest-to-earliest flow here. "next" means earlier. | |
1910 | |
1911 # TODO: interleave build steps and self.events by timestamp. | |
1912 # TODO: um, I think we're already doing that. | |
1913 | |
1914 # TODO: there's probably something clever we could do here to | |
1915 # interleave two event streams (one from self.getBuild and the other | |
1916 # from self.getEvent), which would be simpler than this control flow | |
1917 | |
1918 eventIndex = -1 | |
1919 e = self.getEvent(eventIndex) | |
1920 for Nb in range(1, self.nextBuildNumber+1): | |
1921 b = self.getBuild(-Nb) | |
1922 if not b: | |
1923 # HACK: If this is the first build we are looking at, it is | |
1924 # possible it's in progress but locked before it has written a | |
1925 # pickle; in this case keep looking. | |
1926 if Nb == 1: | |
1927 continue | |
1928 break | |
1929 if b.getTimes()[0] < minTime: | |
1930 break | |
1931 if branches and not b.getSourceStamp().branch in branches: | |
1932 continue | |
1933 if categories and not b.getBuilder().getCategory() in categories: | |
1934 continue | |
1935 if committers and not [True for c in b.getChanges() if c.who in comm
itters]: | |
1936 continue | |
1937 steps = b.getSteps() | |
1938 for Ns in range(1, len(steps)+1): | |
1939 if steps[-Ns].started: | |
1940 step_start = steps[-Ns].getTimes()[0] | |
1941 while e is not None and e.getTimes()[0] > step_start: | |
1942 yield e | |
1943 eventIndex -= 1 | |
1944 e = self.getEvent(eventIndex) | |
1945 yield steps[-Ns] | |
1946 yield b | |
1947 while e is not None: | |
1948 yield e | |
1949 eventIndex -= 1 | |
1950 e = self.getEvent(eventIndex) | |
1951 if e and e.getTimes()[0] < minTime: | |
1952 break | |
1953 | |
1954 def subscribe(self, receiver): | |
1955 # will get builderChangedState, buildStarted, and buildFinished | |
1956 self.watchers.append(receiver) | |
1957 self.publishState(receiver) | |
1958 | |
1959 def unsubscribe(self, receiver): | |
1960 self.watchers.remove(receiver) | |
1961 | |
1962 ## Builder interface (methods called by the Builder which feeds us) | |
1963 | |
1964 def setSlavenames(self, names): | |
1965 self.slavenames = names | |
1966 | |
1967 def addEvent(self, text=[]): | |
1968 # this adds a duration event. When it is done, the user should call | |
1969 # e.finish(). They can also mangle it by modifying .text | |
1970 e = Event() | |
1971 e.started = util.now() | |
1972 e.text = text | |
1973 self.events.append(e) | |
1974 return e # they are free to mangle it further | |
1975 | |
1976 def addPointEvent(self, text=[]): | |
1977 # this adds a point event, one which occurs as a single atomic | |
1978 # instant of time. | |
1979 e = Event() | |
1980 e.started = util.now() | |
1981 e.finished = 0 | |
1982 e.text = text | |
1983 self.events.append(e) | |
1984 return e # for consistency, but they really shouldn't touch it | |
1985 | |
1986 def setBigState(self, state): | |
1987 needToUpdate = state != self.currentBigState | |
1988 self.currentBigState = state | |
1989 if needToUpdate: | |
1990 self.publishState() | |
1991 | |
1992 def publishState(self, target=None): | |
1993 state = self.currentBigState | |
1994 | |
1995 if target is not None: | |
1996 # unicast | |
1997 target.builderChangedState(self.name, state) | |
1998 return | |
1999 for w in self.watchers: | |
2000 try: | |
2001 w.builderChangedState(self.name, state) | |
2002 except: | |
2003 log.msg("Exception caught publishing state to %r" % w) | |
2004 log.err() | |
2005 | |
2006 def newBuild(self): | |
2007 """The Builder has decided to start a build, but the Build object is | |
2008 not yet ready to report status (it has not finished creating the | |
2009 Steps). Create a BuildStatus object that it can use.""" | |
2010 number = self.nextBuildNumber | |
2011 self.nextBuildNumber += 1 | |
2012 # TODO: self.saveYourself(), to make sure we don't forget about the | |
2013 # build number we've just allocated. This is not quite as important | |
2014 # as it was before we switch to determineNextBuildNumber, but I think | |
2015 # it may still be useful to have the new build save itself. | |
2016 s = BuildStatus(self, number) | |
2017 s.waitUntilFinished().addCallback(self._buildFinished) | |
2018 return s | |
2019 | |
2020 def addBuildRequest(self, brstatus): | |
2021 self.pendingBuilds.append(brstatus) | |
2022 for w in self.watchers: | |
2023 w.requestSubmitted(brstatus) | |
2024 | |
2025 def removeBuildRequest(self, brstatus, cancelled=False): | |
2026 self.pendingBuilds.remove(brstatus) | |
2027 if cancelled: | |
2028 for w in self.watchers: | |
2029 w.requestCancelled(self, brstatus) | |
2030 | |
2031 # buildStarted is called by our child BuildStatus instances | |
2032 def buildStarted(self, s): | |
2033 """Now the BuildStatus object is ready to go (it knows all of its | |
2034 Steps, its ETA, etc), so it is safe to notify our watchers.""" | |
2035 | |
2036 assert s.builder is self # paranoia | |
2037 assert s.number == self.nextBuildNumber - 1 | |
2038 assert s not in self.currentBuilds | |
2039 self.currentBuilds.append(s) | |
2040 self.touchBuildCache(s) | |
2041 | |
2042 # now that the BuildStatus is prepared to answer queries, we can | |
2043 # announce the new build to all our watchers | |
2044 | |
2045 for w in self.watchers: # TODO: maybe do this later? callLater(0)? | |
2046 try: | |
2047 receiver = w.buildStarted(self.getName(), s) | |
2048 if receiver: | |
2049 if type(receiver) == type(()): | |
2050 s.subscribe(receiver[0], receiver[1]) | |
2051 else: | |
2052 s.subscribe(receiver) | |
2053 d = s.waitUntilFinished() | |
2054 d.addCallback(lambda s: s.unsubscribe(receiver)) | |
2055 except: | |
2056 log.msg("Exception caught notifying %r of buildStarted event" %
w) | |
2057 log.err() | |
2058 | |
2059 def _buildFinished(self, s): | |
2060 assert s in self.currentBuilds | |
2061 s.saveYourself() | |
2062 self.currentBuilds.remove(s) | |
2063 | |
2064 name = self.getName() | |
2065 results = s.getResults() | |
2066 for w in self.watchers: | |
2067 try: | |
2068 w.buildFinished(name, s, results) | |
2069 except: | |
2070 log.msg("Exception caught notifying %r of buildFinished event" %
w) | |
2071 log.err() | |
2072 | |
2073 self.prune() # conserve disk | |
2074 | |
2075 | |
2076 # waterfall display (history) | |
2077 | |
2078 # I want some kind of build event that holds everything about the build: | |
2079 # why, what changes went into it, the results of the build, itemized | |
2080 # test results, etc. But, I do kind of need something to be inserted in | |
2081 # the event log first, because intermixing step events and the larger | |
2082 # build event is fraught with peril. Maybe an Event-like-thing that | |
2083 # doesn't have a file in it but does have links. Hmm, that's exactly | |
2084 # what it does now. The only difference would be that this event isn't | |
2085 # pushed to the clients. | |
2086 | |
2087 # publish to clients | |
2088 def sendLastBuildStatus(self, client): | |
2089 #client.newLastBuildStatus(self.lastBuildStatus) | |
2090 pass | |
2091 def sendCurrentActivityBigToEveryone(self): | |
2092 for s in self.subscribers: | |
2093 self.sendCurrentActivityBig(s) | |
2094 def sendCurrentActivityBig(self, client): | |
2095 state = self.currentBigState | |
2096 if state == "offline": | |
2097 client.currentlyOffline() | |
2098 elif state == "idle": | |
2099 client.currentlyIdle() | |
2100 elif state == "building": | |
2101 client.currentlyBuilding() | |
2102 else: | |
2103 log.msg("Hey, self.currentBigState is weird:", state) | |
2104 | |
2105 | |
2106 ## HTML display interface | |
2107 | |
2108 def getEventNumbered(self, num): | |
2109 # deal with dropped events, pruned events | |
2110 first = self.events[0].number | |
2111 if first + len(self.events)-1 != self.events[-1].number: | |
2112 log.msg(self, | |
2113 "lost an event somewhere: [0] is %d, [%d] is %d" % \ | |
2114 (self.events[0].number, | |
2115 len(self.events) - 1, | |
2116 self.events[-1].number)) | |
2117 for e in self.events: | |
2118 log.msg("e[%d]: " % e.number, e) | |
2119 return None | |
2120 offset = num - first | |
2121 log.msg(self, "offset", offset) | |
2122 try: | |
2123 return self.events[offset] | |
2124 except IndexError: | |
2125 return None | |
2126 | |
2127 ## Persistence of Status | |
2128 def loadYourOldEvents(self): | |
2129 if hasattr(self, "allEvents"): | |
2130 # first time, nothing to get from file. Note that this is only if | |
2131 # the Application gets .run() . If it gets .save()'ed, then the | |
2132 # .allEvents attribute goes away in the initial __getstate__ and | |
2133 # we try to load a non-existent file. | |
2134 return | |
2135 self.allEvents = self.loadFile("events", []) | |
2136 if self.allEvents: | |
2137 self.nextEventNumber = self.allEvents[-1].number + 1 | |
2138 else: | |
2139 self.nextEventNumber = 0 | |
2140 def saveYourOldEvents(self): | |
2141 self.saveFile("events", self.allEvents) | |
2142 | |
2143 ## clients | |
2144 | |
2145 def addClient(self, client): | |
2146 if client not in self.subscribers: | |
2147 self.subscribers.append(client) | |
2148 self.sendLastBuildStatus(client) | |
2149 self.sendCurrentActivityBig(client) | |
2150 client.newEvent(self.currentSmall) | |
2151 def removeClient(self, client): | |
2152 if client in self.subscribers: | |
2153 self.subscribers.remove(client) | |
2154 | |
2155 def asDict(self): | |
2156 result = {} | |
2157 # Constant | |
2158 # TODO(maruel): Fix me. We don't want to leak the full path. | |
2159 result['basedir'] = os.path.basename(self.basedir) | |
2160 result['category'] = self.category | |
2161 result['slaves'] = self.slavenames | |
2162 #result['url'] = self.parent.getURLForThing(self) | |
2163 # TODO(maruel): Add cache settings? Do we care? | |
2164 | |
2165 # Transient | |
2166 # Collect build numbers. | |
2167 # Important: Only grab the *cached* builds numbers to reduce I/O. | |
2168 current_builds = [b.getNumber() for b in self.currentBuilds] | |
2169 cached_builds = list(set(self.buildCache.keys() + current_builds)) | |
2170 cached_builds.sort() | |
2171 result['cachedBuilds'] = cached_builds | |
2172 result['currentBuilds'] = current_builds | |
2173 result['state'] = self.getState()[0] | |
2174 result['pendingBuilds'] = len(self.getPendingBuilds()) | |
2175 return result | |
2176 | |
2177 | |
2178 class SlaveStatus: | |
2179 implements(interfaces.ISlaveStatus) | |
2180 | |
2181 admin = None | |
2182 host = None | |
2183 access_uri = None | |
2184 version = None | |
2185 connected = False | |
2186 graceful_shutdown = False | |
2187 | |
2188 def __init__(self, name): | |
2189 self.name = name | |
2190 self._lastMessageReceived = 0 | |
2191 self.runningBuilds = [] | |
2192 self.graceful_callbacks = [] | |
2193 | |
2194 def getName(self): | |
2195 return self.name | |
2196 def getAdmin(self): | |
2197 return self.admin | |
2198 def getHost(self): | |
2199 return self.host | |
2200 def getAccessURI(self): | |
2201 return self.access_uri | |
2202 def getVersion(self): | |
2203 return self.version | |
2204 def isConnected(self): | |
2205 return self.connected | |
2206 def lastMessageReceived(self): | |
2207 return self._lastMessageReceived | |
2208 def getRunningBuilds(self): | |
2209 return self.runningBuilds | |
2210 | |
2211 def setAdmin(self, admin): | |
2212 self.admin = admin | |
2213 def setHost(self, host): | |
2214 self.host = host | |
2215 def setAccessURI(self, access_uri): | |
2216 self.access_uri = access_uri | |
2217 def setVersion(self, version): | |
2218 self.version = version | |
2219 def setConnected(self, isConnected): | |
2220 self.connected = isConnected | |
2221 def setLastMessageReceived(self, when): | |
2222 self._lastMessageReceived = when | |
2223 | |
2224 def buildStarted(self, build): | |
2225 self.runningBuilds.append(build) | |
2226 def buildFinished(self, build): | |
2227 self.runningBuilds.remove(build) | |
2228 | |
2229 def getGraceful(self): | |
2230 """Return the graceful shutdown flag""" | |
2231 return self.graceful_shutdown | |
2232 def setGraceful(self, graceful): | |
2233 """Set the graceful shutdown flag, and notify all the watchers""" | |
2234 self.graceful_shutdown = graceful | |
2235 for cb in self.graceful_callbacks: | |
2236 reactor.callLater(0, cb, graceful) | |
2237 def addGracefulWatcher(self, watcher): | |
2238 """Add watcher to the list of watchers to be notified when the | |
2239 graceful shutdown flag is changed.""" | |
2240 if not watcher in self.graceful_callbacks: | |
2241 self.graceful_callbacks.append(watcher) | |
2242 def removeGracefulWatcher(self, watcher): | |
2243 """Remove watcher from the list of watchers to be notified when the | |
2244 graceful shutdown flag is changed.""" | |
2245 if watcher in self.graceful_callbacks: | |
2246 self.graceful_callbacks.remove(watcher) | |
2247 | |
2248 def asDict(self): | |
2249 result = {} | |
2250 # Constant | |
2251 result['name'] = self.getName() | |
2252 result['access_uri'] = self.getAccessURI() | |
2253 | |
2254 # Transient (since it changes when the slave reconnects) | |
2255 result['host'] = self.getHost() | |
2256 result['admin'] = self.getAdmin() | |
2257 result['version'] = self.getVersion() | |
2258 result['connected'] = self.isConnected() | |
2259 result['runningBuilds'] = [b.asDict() for b in self.getRunningBuilds()] | |
2260 return result | |
2261 | |
2262 | |
2263 class Status: | |
2264 """ | |
2265 I represent the status of the buildmaster. | |
2266 """ | |
2267 implements(interfaces.IStatus) | |
2268 | |
2269 def __init__(self, botmaster, basedir): | |
2270 """ | |
2271 @type botmaster: L{buildbot.master.BotMaster} | |
2272 @param botmaster: the Status object uses C{.botmaster} to get at | |
2273 both the L{buildbot.master.BuildMaster} (for | |
2274 various buildbot-wide parameters) and the | |
2275 actual Builders (to get at their L{BuilderStatus} | |
2276 objects). It is not allowed to change or influence | |
2277 anything through this reference. | |
2278 @type basedir: string | |
2279 @param basedir: this provides a base directory in which saved status | |
2280 information (changes.pck, saved Build status | |
2281 pickles) can be stored | |
2282 """ | |
2283 self.botmaster = botmaster | |
2284 self.basedir = basedir | |
2285 self.watchers = [] | |
2286 self.activeBuildSets = [] | |
2287 assert os.path.isdir(basedir) | |
2288 # compress logs bigger than 4k, a good default on linux | |
2289 self.logCompressionLimit = 4*1024 | |
2290 self.logCompressionMethod = "bz2" | |
2291 # No default limit to the log size | |
2292 self.logMaxSize = None | |
2293 self.logMaxTailSize = None | |
2294 | |
2295 | |
2296 # methods called by our clients | |
2297 | |
2298 def getProjectName(self): | |
2299 return self.botmaster.parent.projectName | |
2300 def getProjectURL(self): | |
2301 return self.botmaster.parent.projectURL | |
2302 def getBuildbotURL(self): | |
2303 return self.botmaster.parent.buildbotURL | |
2304 | |
2305 def getURLForThing(self, thing): | |
2306 prefix = self.getBuildbotURL() | |
2307 if not prefix: | |
2308 return None | |
2309 if interfaces.IStatus.providedBy(thing): | |
2310 return prefix | |
2311 if interfaces.ISchedulerStatus.providedBy(thing): | |
2312 pass | |
2313 if interfaces.IBuilderStatus.providedBy(thing): | |
2314 builder = thing | |
2315 return prefix + "builders/%s" % ( | |
2316 urllib.quote(builder.getName(), safe=''), | |
2317 ) | |
2318 if interfaces.IBuildStatus.providedBy(thing): | |
2319 build = thing | |
2320 builder = build.getBuilder() | |
2321 return prefix + "builders/%s/builds/%d" % ( | |
2322 urllib.quote(builder.getName(), safe=''), | |
2323 build.getNumber()) | |
2324 if interfaces.IBuildStepStatus.providedBy(thing): | |
2325 step = thing | |
2326 build = step.getBuild() | |
2327 builder = build.getBuilder() | |
2328 return prefix + "builders/%s/builds/%d/steps/%s" % ( | |
2329 urllib.quote(builder.getName(), safe=''), | |
2330 build.getNumber(), | |
2331 urllib.quote(step.getName(), safe='')) | |
2332 # IBuildSetStatus | |
2333 # IBuildRequestStatus | |
2334 # ISlaveStatus | |
2335 | |
2336 # IStatusEvent | |
2337 if interfaces.IStatusEvent.providedBy(thing): | |
2338 from buildbot.changes import changes | |
2339 # TODO: this is goofy, create IChange or something | |
2340 if isinstance(thing, changes.Change): | |
2341 change = thing | |
2342 return "%schanges/%d" % (prefix, change.number) | |
2343 | |
2344 if interfaces.IStatusLog.providedBy(thing): | |
2345 log = thing | |
2346 step = log.getStep() | |
2347 build = step.getBuild() | |
2348 builder = build.getBuilder() | |
2349 | |
2350 logs = step.getLogs() | |
2351 for i in range(len(logs)): | |
2352 if log is logs[i]: | |
2353 lognum = i | |
2354 break | |
2355 else: | |
2356 return None | |
2357 return prefix + "builders/%s/builds/%d/steps/%s/logs/%s" % ( | |
2358 urllib.quote(builder.getName(), safe=''), | |
2359 build.getNumber(), | |
2360 urllib.quote(step.getName(), safe=''), | |
2361 urllib.quote(log.getName())) | |
2362 | |
2363 def getChangeSources(self): | |
2364 return list(self.botmaster.parent.change_svc) | |
2365 | |
2366 def getChange(self, number): | |
2367 return self.botmaster.parent.change_svc.getChangeNumbered(number) | |
2368 | |
2369 def getSchedulers(self): | |
2370 return self.botmaster.parent.allSchedulers() | |
2371 | |
2372 def getBuilderNames(self, categories=None): | |
2373 if categories == None: | |
2374 return self.botmaster.builderNames[:] # don't let them break it | |
2375 | |
2376 l = [] | |
2377 # respect addition order | |
2378 for name in self.botmaster.builderNames: | |
2379 builder = self.botmaster.builders[name] | |
2380 if builder.builder_status.category in categories: | |
2381 l.append(name) | |
2382 return l | |
2383 | |
2384 def getBuilder(self, name): | |
2385 """ | |
2386 @rtype: L{BuilderStatus} | |
2387 """ | |
2388 return self.botmaster.builders[name].builder_status | |
2389 | |
2390 def getSlaveNames(self): | |
2391 return self.botmaster.slaves.keys() | |
2392 | |
2393 def getSlave(self, slavename): | |
2394 return self.botmaster.slaves[slavename].slave_status | |
2395 | |
2396 def getBuildSets(self): | |
2397 return self.activeBuildSets[:] | |
2398 | |
2399 def generateFinishedBuilds(self, builders=[], branches=[], | |
2400 num_builds=None, finished_before=None, | |
2401 max_search=200): | |
2402 | |
2403 def want_builder(bn): | |
2404 if builders: | |
2405 return bn in builders | |
2406 return True | |
2407 builder_names = [bn | |
2408 for bn in self.getBuilderNames() | |
2409 if want_builder(bn)] | |
2410 | |
2411 # 'sources' is a list of generators, one for each Builder we're | |
2412 # using. When the generator is exhausted, it is replaced in this list | |
2413 # with None. | |
2414 sources = [] | |
2415 for bn in builder_names: | |
2416 b = self.getBuilder(bn) | |
2417 g = b.generateFinishedBuilds(branches, | |
2418 finished_before=finished_before, | |
2419 max_search=max_search) | |
2420 sources.append(g) | |
2421 | |
2422 # next_build the next build from each source | |
2423 next_build = [None] * len(sources) | |
2424 | |
2425 def refill(): | |
2426 for i,g in enumerate(sources): | |
2427 if next_build[i]: | |
2428 # already filled | |
2429 continue | |
2430 if not g: | |
2431 # already exhausted | |
2432 continue | |
2433 try: | |
2434 next_build[i] = g.next() | |
2435 except StopIteration: | |
2436 next_build[i] = None | |
2437 sources[i] = None | |
2438 | |
2439 got = 0 | |
2440 while True: | |
2441 refill() | |
2442 # find the latest build among all the candidates | |
2443 candidates = [(i, b, b.getTimes()[1]) | |
2444 for i,b in enumerate(next_build) | |
2445 if b is not None] | |
2446 candidates.sort(lambda x,y: cmp(x[2], y[2])) | |
2447 if not candidates: | |
2448 return | |
2449 | |
2450 # and remove it from the list | |
2451 i, build, finshed_time = candidates[-1] | |
2452 next_build[i] = None | |
2453 got += 1 | |
2454 yield build | |
2455 if num_builds is not None: | |
2456 if got >= num_builds: | |
2457 return | |
2458 | |
2459 def subscribe(self, target): | |
2460 self.watchers.append(target) | |
2461 for name in self.botmaster.builderNames: | |
2462 self.announceNewBuilder(target, name, self.getBuilder(name)) | |
2463 def unsubscribe(self, target): | |
2464 self.watchers.remove(target) | |
2465 | |
2466 | |
2467 # methods called by upstream objects | |
2468 | |
2469 def announceNewBuilder(self, target, name, builder_status): | |
2470 t = target.builderAdded(name, builder_status) | |
2471 if t: | |
2472 builder_status.subscribe(t) | |
2473 | |
2474 def builderAdded(self, name, basedir, category=None): | |
2475 """ | |
2476 @rtype: L{BuilderStatus} | |
2477 """ | |
2478 filename = os.path.join(self.basedir, basedir, "builder") | |
2479 log.msg("trying to load status pickle from %s" % filename) | |
2480 builder_status = None | |
2481 try: | |
2482 builder_status = load(open(filename, "rb")) | |
2483 styles.doUpgrade() | |
2484 except IOError: | |
2485 log.msg("no saved status pickle, creating a new one") | |
2486 except: | |
2487 log.msg("error while loading status pickle, creating a new one") | |
2488 log.msg("error follows:") | |
2489 log.err() | |
2490 if not builder_status: | |
2491 builder_status = BuilderStatus(name, category) | |
2492 builder_status.addPointEvent(["builder", "created"]) | |
2493 log.msg("added builder %s in category %s" % (name, category)) | |
2494 # an unpickled object might not have category set from before, | |
2495 # so set it here to make sure | |
2496 builder_status.category = category | |
2497 builder_status.basedir = os.path.join(self.basedir, basedir) | |
2498 builder_status.name = name # it might have been updated | |
2499 builder_status.status = self | |
2500 | |
2501 if not os.path.isdir(builder_status.basedir): | |
2502 os.makedirs(builder_status.basedir) | |
2503 builder_status.determineNextBuildNumber() | |
2504 | |
2505 builder_status.setBigState("offline") | |
2506 builder_status.setLogCompressionLimit(self.logCompressionLimit) | |
2507 builder_status.setLogCompressionMethod(self.logCompressionMethod) | |
2508 builder_status.setLogMaxSize(self.logMaxSize) | |
2509 builder_status.setLogMaxTailSize(self.logMaxTailSize) | |
2510 | |
2511 for t in self.watchers: | |
2512 self.announceNewBuilder(t, name, builder_status) | |
2513 | |
2514 return builder_status | |
2515 | |
2516 def builderRemoved(self, name): | |
2517 for t in self.watchers: | |
2518 t.builderRemoved(name) | |
2519 | |
2520 def slaveConnected(self, name): | |
2521 for t in self.watchers: | |
2522 t.slaveConnected(name) | |
2523 | |
2524 def slaveDisconnected(self, name): | |
2525 for t in self.watchers: | |
2526 t.slaveDisconnected(name) | |
2527 | |
2528 def buildsetSubmitted(self, bss): | |
2529 self.activeBuildSets.append(bss) | |
2530 bss.waitUntilFinished().addCallback(self.activeBuildSets.remove) | |
2531 for t in self.watchers: | |
2532 t.buildsetSubmitted(bss) | |
2533 | |
2534 def changeAdded(self, change): | |
2535 for t in self.watchers: | |
2536 t.changeAdded(change) | |
2537 | |
2538 def asDict(self): | |
2539 result = {} | |
2540 # Constant | |
2541 result['projectName'] = self.getProjectName() | |
2542 result['projectURL'] = self.getProjectURL() | |
2543 result['buildbotURL'] = self.getBuildbotURL() | |
2544 # TODO: self.getSchedulers() | |
2545 # self.getChangeSources() | |
2546 return result | |
2547 | |
2548 # vim: set ts=4 sts=4 sw=4 et: | |
OLD | NEW |