OLD | NEW |
| (Empty) |
1 # -*- test-case-name: buildbot.test.test_scheduler,buildbot.test.test_vc -*- | |
2 | |
3 import sys, os, re, time, random | |
4 from twisted.internet import utils, protocol, defer, reactor, task | |
5 from twisted.spread import pb | |
6 from twisted.cred import credentials | |
7 from twisted.python import log | |
8 from twisted.python.procutils import which | |
9 | |
10 from buildbot.sourcestamp import SourceStamp | |
11 from buildbot.scripts import runner | |
12 from buildbot.util import now | |
13 from buildbot.status import builder | |
14 | |
15 class SourceStampExtractor: | |
16 | |
17 def __init__(self, treetop, branch): | |
18 self.treetop = treetop | |
19 self.branch = branch | |
20 self.exe = which(self.vcexe)[0] | |
21 | |
22 def dovc(self, cmd): | |
23 """This accepts the arguments of a command, without the actual | |
24 command itself.""" | |
25 env = os.environ.copy() | |
26 env['LC_ALL'] = "C" | |
27 d = utils.getProcessOutputAndValue(self.exe, cmd, env=env, | |
28 path=self.treetop) | |
29 d.addCallback(self._didvc, cmd) | |
30 return d | |
31 def _didvc(self, res, cmd): | |
32 (stdout, stderr, code) = res | |
33 # 'bzr diff' sets rc=1 if there were any differences. tla, baz, and | |
34 # cvs do something similar, so don't bother requring rc=0. | |
35 return stdout | |
36 | |
37 def get(self): | |
38 """Return a Deferred that fires with a SourceStamp instance.""" | |
39 d = self.getBaseRevision() | |
40 d.addCallback(self.getPatch) | |
41 d.addCallback(self.done) | |
42 return d | |
43 def readPatch(self, res, patchlevel): | |
44 self.patch = (patchlevel, res) | |
45 def done(self, res): | |
46 # TODO: figure out the branch too | |
47 ss = SourceStamp(self.branch, self.baserev, self.patch) | |
48 return ss | |
49 | |
50 class CVSExtractor(SourceStampExtractor): | |
51 patchlevel = 0 | |
52 vcexe = "cvs" | |
53 def getBaseRevision(self): | |
54 # this depends upon our local clock and the repository's clock being | |
55 # reasonably synchronized with each other. We express everything in | |
56 # UTC because the '%z' format specifier for strftime doesn't always | |
57 # work. | |
58 self.baserev = time.strftime("%Y-%m-%d %H:%M:%S +0000", | |
59 time.gmtime(now())) | |
60 return defer.succeed(None) | |
61 | |
62 def getPatch(self, res): | |
63 # the -q tells CVS to not announce each directory as it works | |
64 if self.branch is not None: | |
65 # 'cvs diff' won't take both -r and -D at the same time (it | |
66 # ignores the -r). As best I can tell, there is no way to make | |
67 # cvs give you a diff relative to a timestamp on the non-trunk | |
68 # branch. A bare 'cvs diff' will tell you about the changes | |
69 # relative to your checked-out versions, but I know of no way to | |
70 # find out what those checked-out versions are. | |
71 raise RuntimeError("Sorry, CVS 'try' builds don't work with " | |
72 "branches") | |
73 args = ['-q', 'diff', '-u', '-D', self.baserev] | |
74 d = self.dovc(args) | |
75 d.addCallback(self.readPatch, self.patchlevel) | |
76 return d | |
77 | |
78 class SVNExtractor(SourceStampExtractor): | |
79 patchlevel = 0 | |
80 vcexe = "svn" | |
81 | |
82 def getBaseRevision(self): | |
83 d = self.dovc(["status", "-u"]) | |
84 d.addCallback(self.parseStatus) | |
85 return d | |
86 def parseStatus(self, res): | |
87 # svn shows the base revision for each file that has been modified or | |
88 # which needs an update. You can update each file to a different | |
89 # version, so each file is displayed with its individual base | |
90 # revision. It also shows the repository-wide latest revision number | |
91 # on the last line ("Status against revision: \d+"). | |
92 | |
93 # for our purposes, we use the latest revision number as the "base" | |
94 # revision, and get a diff against that. This means we will get | |
95 # reverse-diffs for local files that need updating, but the resulting | |
96 # tree will still be correct. The only weirdness is that the baserev | |
97 # that we emit may be different than the version of the tree that we | |
98 # first checked out. | |
99 | |
100 # to do this differently would probably involve scanning the revision | |
101 # numbers to find the max (or perhaps the min) revision, and then | |
102 # using that as a base. | |
103 | |
104 for line in res.split("\n"): | |
105 m = re.search(r'^Status against revision:\s+(\d+)', line) | |
106 if m: | |
107 self.baserev = int(m.group(1)) | |
108 return | |
109 raise IndexError("Could not find 'Status against revision' in " | |
110 "SVN output: %s" % res) | |
111 def getPatch(self, res): | |
112 d = self.dovc(["diff", "-r%d" % self.baserev]) | |
113 d.addCallback(self.readPatch, self.patchlevel) | |
114 return d | |
115 | |
116 class BazExtractor(SourceStampExtractor): | |
117 patchlevel = 1 | |
118 vcexe = "baz" | |
119 def getBaseRevision(self): | |
120 d = self.dovc(["tree-id"]) | |
121 d.addCallback(self.parseStatus) | |
122 return d | |
123 def parseStatus(self, res): | |
124 tid = res.strip() | |
125 slash = tid.index("/") | |
126 dd = tid.rindex("--") | |
127 self.branch = tid[slash+1:dd] | |
128 self.baserev = tid[dd+2:] | |
129 def getPatch(self, res): | |
130 d = self.dovc(["diff"]) | |
131 d.addCallback(self.readPatch, self.patchlevel) | |
132 return d | |
133 | |
134 class TlaExtractor(SourceStampExtractor): | |
135 patchlevel = 1 | |
136 vcexe = "tla" | |
137 def getBaseRevision(self): | |
138 # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION | |
139 # 'tla logs' gives us REVISION | |
140 d = self.dovc(["logs", "--full", "--reverse"]) | |
141 d.addCallback(self.parseStatus) | |
142 return d | |
143 def parseStatus(self, res): | |
144 tid = res.split("\n")[0].strip() | |
145 slash = tid.index("/") | |
146 dd = tid.rindex("--") | |
147 self.branch = tid[slash+1:dd] | |
148 self.baserev = tid[dd+2:] | |
149 | |
150 def getPatch(self, res): | |
151 d = self.dovc(["changes", "--diffs"]) | |
152 d.addCallback(self.readPatch, self.patchlevel) | |
153 return d | |
154 | |
155 class BzrExtractor(SourceStampExtractor): | |
156 patchlevel = 0 | |
157 vcexe = "bzr" | |
158 def getBaseRevision(self): | |
159 d = self.dovc(["revision-info","-rsubmit:"]) | |
160 d.addCallback(self.get_revision_number) | |
161 return d | |
162 | |
163 def get_revision_number(self, out): | |
164 revno, revid= out.split() | |
165 self.baserev = 'revid:' + revid | |
166 return | |
167 | |
168 def getPatch(self, res): | |
169 d = self.dovc(["diff","-r%s.." % self.baserev]) | |
170 d.addCallback(self.readPatch, self.patchlevel) | |
171 return d | |
172 | |
173 class MercurialExtractor(SourceStampExtractor): | |
174 patchlevel = 1 | |
175 vcexe = "hg" | |
176 def getBaseRevision(self): | |
177 d = self.dovc(["identify"]) | |
178 d.addCallback(self.parseStatus) | |
179 return d | |
180 def parseStatus(self, output): | |
181 m = re.search(r'^(\w+)', output) | |
182 self.baserev = m.group(0) | |
183 def getPatch(self, res): | |
184 d = self.dovc(["diff"]) | |
185 d.addCallback(self.readPatch, self.patchlevel) | |
186 return d | |
187 | |
188 class DarcsExtractor(SourceStampExtractor): | |
189 patchlevel = 1 | |
190 vcexe = "darcs" | |
191 def getBaseRevision(self): | |
192 d = self.dovc(["changes", "--context"]) | |
193 d.addCallback(self.parseStatus) | |
194 return d | |
195 def parseStatus(self, res): | |
196 self.baserev = res # the whole context file | |
197 def getPatch(self, res): | |
198 d = self.dovc(["diff", "-u"]) | |
199 d.addCallback(self.readPatch, self.patchlevel) | |
200 return d | |
201 | |
202 class GitExtractor(SourceStampExtractor): | |
203 patchlevel = 1 | |
204 vcexe = "git" | |
205 | |
206 def getBaseRevision(self): | |
207 d = self.dovc(["branch", "--no-color", "-v", "--no-abbrev"]) | |
208 d.addCallback(self.parseStatus) | |
209 return d | |
210 | |
211 def readConfig(self): | |
212 d = self.dovc(["config", "-l"]) | |
213 d.addCallback(self.parseConfig) | |
214 return d | |
215 | |
216 def parseConfig(self, res): | |
217 git_config = {} | |
218 for l in res.split("\n"): | |
219 if l.strip(): | |
220 parts = l.strip().split("=", 2) | |
221 git_config[parts[0]] = parts[1] | |
222 | |
223 # If we're tracking a remote, consider that the base. | |
224 remote = git_config.get("branch." + self.branch + ".remote") | |
225 ref = git_config.get("branch." + self.branch + ".merge") | |
226 if remote and ref: | |
227 remote_branch = ref.split("/", 3)[-1] | |
228 d = self.dovc(["rev-parse", remote + "/" + remote_branch]) | |
229 d.addCallback(self.override_baserev) | |
230 return d | |
231 | |
232 def override_baserev(self, res): | |
233 self.baserev = res.strip() | |
234 | |
235 def parseStatus(self, res): | |
236 # The current branch is marked by '*' at the start of the | |
237 # line, followed by the branch name and the SHA1. | |
238 # | |
239 # Branch names may contain pretty much anything but whitespace. | |
240 m = re.search(r'^\* (\S+)\s+([0-9a-f]{40})', res, re.MULTILINE) | |
241 if m: | |
242 self.baserev = m.group(2) | |
243 # If a branch is specified, parse out the rev it points to | |
244 # and extract the local name (assuming it has a slash). | |
245 # This may break if someone specifies the name of a local | |
246 # branch that has a slash in it and has no corresponding | |
247 # remote branch (or something similarly contrived). | |
248 if self.branch: | |
249 d = self.dovc(["rev-parse", self.branch]) | |
250 if '/' in self.branch: | |
251 self.branch = self.branch.split('/', 1)[1] | |
252 d.addCallback(self.override_baserev) | |
253 return d | |
254 else: | |
255 self.branch = m.group(1) | |
256 return self.readConfig() | |
257 raise IndexError("Could not find current GIT branch: %s" % res) | |
258 | |
259 def getPatch(self, res): | |
260 d = self.dovc(["diff", self.baserev]) | |
261 d.addCallback(self.readPatch, self.patchlevel) | |
262 return d | |
263 | |
264 def getSourceStamp(vctype, treetop, branch=None): | |
265 if vctype == "cvs": | |
266 e = CVSExtractor(treetop, branch) | |
267 elif vctype == "svn": | |
268 e = SVNExtractor(treetop, branch) | |
269 elif vctype == "baz": | |
270 e = BazExtractor(treetop, branch) | |
271 elif vctype == "bzr": | |
272 e = BzrExtractor(treetop, branch) | |
273 elif vctype == "tla": | |
274 e = TlaExtractor(treetop, branch) | |
275 elif vctype == "hg": | |
276 e = MercurialExtractor(treetop, branch) | |
277 elif vctype == "darcs": | |
278 e = DarcsExtractor(treetop, branch) | |
279 elif vctype == "git": | |
280 e = GitExtractor(treetop, branch) | |
281 else: | |
282 raise KeyError("unknown vctype '%s'" % vctype) | |
283 return e.get() | |
284 | |
285 | |
286 def ns(s): | |
287 return "%d:%s," % (len(s), s) | |
288 | |
289 def createJobfile(bsid, branch, baserev, patchlevel, diff, builderNames): | |
290 job = "" | |
291 job += ns("1") | |
292 job += ns(bsid) | |
293 job += ns(branch) | |
294 job += ns(str(baserev)) | |
295 job += ns("%d" % patchlevel) | |
296 job += ns(diff) | |
297 for bn in builderNames: | |
298 job += ns(bn) | |
299 return job | |
300 | |
301 def getTopdir(topfile, start=None): | |
302 """walk upwards from the current directory until we find this topfile""" | |
303 if not start: | |
304 start = os.getcwd() | |
305 here = start | |
306 toomany = 20 | |
307 while toomany > 0: | |
308 if os.path.exists(os.path.join(here, topfile)): | |
309 return here | |
310 next = os.path.dirname(here) | |
311 if next == here: | |
312 break # we've hit the root | |
313 here = next | |
314 toomany -= 1 | |
315 raise ValueError("Unable to find topfile '%s' anywhere from %s upwards" | |
316 % (topfile, start)) | |
317 | |
318 class RemoteTryPP(protocol.ProcessProtocol): | |
319 def __init__(self, job): | |
320 self.job = job | |
321 self.d = defer.Deferred() | |
322 def connectionMade(self): | |
323 self.transport.write(self.job) | |
324 self.transport.closeStdin() | |
325 def outReceived(self, data): | |
326 sys.stdout.write(data) | |
327 def errReceived(self, data): | |
328 sys.stderr.write(data) | |
329 def processEnded(self, status_object): | |
330 sig = status_object.value.signal | |
331 rc = status_object.value.exitCode | |
332 if sig != None or rc != 0: | |
333 self.d.errback(RuntimeError("remote 'buildbot tryserver' failed" | |
334 ": sig=%s, rc=%s" % (sig, rc))) | |
335 return | |
336 self.d.callback((sig, rc)) | |
337 | |
338 class BuildSetStatusGrabber: | |
339 retryCount = 5 # how many times to we try to grab the BuildSetStatus? | |
340 retryDelay = 3 # seconds to wait between attempts | |
341 | |
342 def __init__(self, status, bsid): | |
343 self.status = status | |
344 self.bsid = bsid | |
345 | |
346 def grab(self): | |
347 # return a Deferred that either fires with the BuildSetStatus | |
348 # reference or errbacks because we were unable to grab it | |
349 self.d = defer.Deferred() | |
350 # wait a second before querying to give the master's maildir watcher | |
351 # a chance to see the job | |
352 reactor.callLater(1, self.go) | |
353 return self.d | |
354 | |
355 def go(self, dummy=None): | |
356 if self.retryCount == 0: | |
357 raise RuntimeError("couldn't find matching buildset") | |
358 self.retryCount -= 1 | |
359 d = self.status.callRemote("getBuildSets") | |
360 d.addCallback(self._gotSets) | |
361 | |
362 def _gotSets(self, buildsets): | |
363 for bs,bsid in buildsets: | |
364 if bsid == self.bsid: | |
365 # got it | |
366 self.d.callback(bs) | |
367 return | |
368 d = defer.Deferred() | |
369 d.addCallback(self.go) | |
370 reactor.callLater(self.retryDelay, d.callback, None) | |
371 | |
372 | |
373 class Try(pb.Referenceable): | |
374 buildsetStatus = None | |
375 quiet = False | |
376 | |
377 def __init__(self, config): | |
378 self.config = config | |
379 self.connect = self.getopt('connect') | |
380 assert self.connect, "you must specify a connect style: ssh or pb" | |
381 self.builderNames = self.getopt('builders') | |
382 | |
383 def getopt(self, config_name, default=None): | |
384 value = self.config.get(config_name) | |
385 if value is None or value == []: | |
386 value = default | |
387 return value | |
388 | |
389 def createJob(self): | |
390 # returns a Deferred which fires when the job parameters have been | |
391 # created | |
392 | |
393 # generate a random (unique) string. It would make sense to add a | |
394 # hostname and process ID here, but a) I suspect that would cause | |
395 # windows portability problems, and b) really this is good enough | |
396 self.bsid = "%d-%s" % (time.time(), random.randint(0, 1000000)) | |
397 | |
398 # common options | |
399 branch = self.getopt("branch") | |
400 | |
401 difffile = self.config.get("diff") | |
402 if difffile: | |
403 baserev = self.config.get("baserev") | |
404 if difffile == "-": | |
405 diff = sys.stdin.read() | |
406 else: | |
407 diff = open(difffile,"r").read() | |
408 patch = (self.config['patchlevel'], diff) | |
409 ss = SourceStamp(branch, baserev, patch) | |
410 d = defer.succeed(ss) | |
411 else: | |
412 vc = self.getopt("vc") | |
413 if vc in ("cvs", "svn"): | |
414 # we need to find the tree-top | |
415 topdir = self.getopt("try-topdir") | |
416 if topdir: | |
417 treedir = os.path.expanduser(topdir) | |
418 else: | |
419 topfile = self.getopt("try-topfile") | |
420 treedir = getTopdir(topfile) | |
421 else: | |
422 treedir = os.getcwd() | |
423 d = getSourceStamp(vc, treedir, branch) | |
424 d.addCallback(self._createJob_1) | |
425 return d | |
426 | |
427 def _createJob_1(self, ss): | |
428 self.sourcestamp = ss | |
429 if self.connect == "ssh": | |
430 patchlevel, diff = ss.patch | |
431 revspec = ss.revision | |
432 if revspec is None: | |
433 revspec = "" | |
434 self.jobfile = createJobfile(self.bsid, | |
435 ss.branch or "", revspec, | |
436 patchlevel, diff, | |
437 self.builderNames) | |
438 | |
439 def fakeDeliverJob(self): | |
440 # Display the job to be delivered, but don't perform delivery. | |
441 ss = self.sourcestamp | |
442 print ("Job:\n\tBranch: %s\n\tRevision: %s\n\tBuilders: %s\n%s" | |
443 % (ss.branch, | |
444 ss.revision, | |
445 self.builderNames, | |
446 ss.patch[1])) | |
447 d = defer.Deferred() | |
448 d.callback(True) | |
449 return d | |
450 | |
451 def deliverJob(self): | |
452 # returns a Deferred that fires when the job has been delivered | |
453 | |
454 if self.connect == "ssh": | |
455 tryhost = self.getopt("tryhost") | |
456 tryuser = self.getopt("username") | |
457 trydir = self.getopt("trydir") | |
458 | |
459 argv = ["ssh", "-l", tryuser, tryhost, | |
460 "buildbot", "tryserver", "--jobdir", trydir] | |
461 # now run this command and feed the contents of 'job' into stdin | |
462 | |
463 pp = RemoteTryPP(self.jobfile) | |
464 p = reactor.spawnProcess(pp, argv[0], argv, os.environ) | |
465 d = pp.d | |
466 return d | |
467 if self.connect == "pb": | |
468 user = self.getopt("username") | |
469 passwd = self.getopt("passwd") | |
470 master = self.getopt("master") | |
471 tryhost, tryport = master.split(":") | |
472 tryport = int(tryport) | |
473 f = pb.PBClientFactory() | |
474 d = f.login(credentials.UsernamePassword(user, passwd)) | |
475 reactor.connectTCP(tryhost, tryport, f) | |
476 d.addCallback(self._deliverJob_pb) | |
477 return d | |
478 raise RuntimeError("unknown connecttype '%s', should be 'ssh' or 'pb'" | |
479 % self.connect) | |
480 | |
481 def _deliverJob_pb(self, remote): | |
482 ss = self.sourcestamp | |
483 | |
484 d = remote.callRemote("try", | |
485 ss.branch, | |
486 ss.revision, | |
487 ss.patch, | |
488 self.builderNames, | |
489 self.config.get('properties', {})) | |
490 d.addCallback(self._deliverJob_pb2) | |
491 return d | |
492 def _deliverJob_pb2(self, status): | |
493 self.buildsetStatus = status | |
494 return status | |
495 | |
496 def getStatus(self): | |
497 # returns a Deferred that fires when the builds have finished, and | |
498 # may emit status messages while we wait | |
499 wait = bool(self.getopt("wait", "try_wait", False)) | |
500 if not wait: | |
501 # TODO: emit the URL where they can follow the builds. This | |
502 # requires contacting the Status server over PB and doing | |
503 # getURLForThing() on the BuildSetStatus. To get URLs for | |
504 # individual builds would require we wait for the builds to | |
505 # start. | |
506 print "not waiting for builds to finish" | |
507 return | |
508 d = self.running = defer.Deferred() | |
509 if self.buildsetStatus: | |
510 self._getStatus_1() | |
511 # contact the status port | |
512 # we're probably using the ssh style | |
513 master = self.getopt("master") | |
514 host, port = master.split(":") | |
515 port = int(port) | |
516 self.announce("contacting the status port at %s:%d" % (host, port)) | |
517 f = pb.PBClientFactory() | |
518 creds = credentials.UsernamePassword("statusClient", "clientpw") | |
519 d = f.login(creds) | |
520 reactor.connectTCP(host, port, f) | |
521 d.addCallback(self._getStatus_ssh_1) | |
522 return self.running | |
523 | |
524 def _getStatus_ssh_1(self, remote): | |
525 # find a remotereference to the corresponding BuildSetStatus object | |
526 self.announce("waiting for job to be accepted") | |
527 g = BuildSetStatusGrabber(remote, self.bsid) | |
528 d = g.grab() | |
529 d.addCallback(self._getStatus_1) | |
530 return d | |
531 | |
532 def _getStatus_1(self, res=None): | |
533 if res: | |
534 self.buildsetStatus = res | |
535 # gather the set of BuildRequests | |
536 d = self.buildsetStatus.callRemote("getBuildRequests") | |
537 d.addCallback(self._getStatus_2) | |
538 | |
539 def _getStatus_2(self, brs): | |
540 self.builderNames = [] | |
541 self.buildRequests = {} | |
542 | |
543 # self.builds holds the current BuildStatus object for each one | |
544 self.builds = {} | |
545 | |
546 # self.outstanding holds the list of builderNames which haven't | |
547 # finished yet | |
548 self.outstanding = [] | |
549 | |
550 # self.results holds the list of build results. It holds a tuple of | |
551 # (result, text) | |
552 self.results = {} | |
553 | |
554 # self.currentStep holds the name of the Step that each build is | |
555 # currently running | |
556 self.currentStep = {} | |
557 | |
558 # self.ETA holds the expected finishing time (absolute time since | |
559 # epoch) | |
560 self.ETA = {} | |
561 | |
562 for n,br in brs: | |
563 self.builderNames.append(n) | |
564 self.buildRequests[n] = br | |
565 self.builds[n] = None | |
566 self.outstanding.append(n) | |
567 self.results[n] = [None,None] | |
568 self.currentStep[n] = None | |
569 self.ETA[n] = None | |
570 # get new Builds for this buildrequest. We follow each one until | |
571 # it finishes or is interrupted. | |
572 br.callRemote("subscribe", self) | |
573 | |
574 # now that those queries are in transit, we can start the | |
575 # display-status-every-30-seconds loop | |
576 self.printloop = task.LoopingCall(self.printStatus) | |
577 self.printloop.start(3, now=False) | |
578 | |
579 | |
580 # these methods are invoked by the status objects we've subscribed to | |
581 | |
582 def remote_newbuild(self, bs, builderName): | |
583 if self.builds[builderName]: | |
584 self.builds[builderName].callRemote("unsubscribe", self) | |
585 self.builds[builderName] = bs | |
586 bs.callRemote("subscribe", self, 20) | |
587 d = bs.callRemote("waitUntilFinished") | |
588 d.addCallback(self._build_finished, builderName) | |
589 | |
590 def remote_stepStarted(self, buildername, build, stepname, step): | |
591 self.currentStep[buildername] = stepname | |
592 | |
593 def remote_stepFinished(self, buildername, build, stepname, step, results): | |
594 pass | |
595 | |
596 def remote_buildETAUpdate(self, buildername, build, eta): | |
597 self.ETA[buildername] = now() + eta | |
598 | |
599 def _build_finished(self, bs, builderName): | |
600 # we need to collect status from the newly-finished build. We don't | |
601 # remove the build from self.outstanding until we've collected | |
602 # everything we want. | |
603 self.builds[builderName] = None | |
604 self.ETA[builderName] = None | |
605 self.currentStep[builderName] = "finished" | |
606 d = bs.callRemote("getResults") | |
607 d.addCallback(self._build_finished_2, bs, builderName) | |
608 return d | |
609 def _build_finished_2(self, results, bs, builderName): | |
610 self.results[builderName][0] = results | |
611 d = bs.callRemote("getText") | |
612 d.addCallback(self._build_finished_3, builderName) | |
613 return d | |
614 def _build_finished_3(self, text, builderName): | |
615 self.results[builderName][1] = text | |
616 | |
617 self.outstanding.remove(builderName) | |
618 if not self.outstanding: | |
619 # all done | |
620 return self.statusDone() | |
621 | |
622 def printStatus(self): | |
623 names = self.buildRequests.keys() | |
624 names.sort() | |
625 for n in names: | |
626 if n not in self.outstanding: | |
627 # the build is finished, and we have results | |
628 code,text = self.results[n] | |
629 t = builder.Results[code] | |
630 if text: | |
631 t += " (%s)" % " ".join(text) | |
632 elif self.builds[n]: | |
633 t = self.currentStep[n] or "building" | |
634 if self.ETA[n]: | |
635 t += " [ETA %ds]" % (self.ETA[n] - now()) | |
636 else: | |
637 t = "no build" | |
638 self.announce("%s: %s" % (n, t)) | |
639 self.announce("") | |
640 | |
641 def statusDone(self): | |
642 self.printloop.stop() | |
643 print "All Builds Complete" | |
644 # TODO: include a URL for all failing builds | |
645 names = self.buildRequests.keys() | |
646 names.sort() | |
647 happy = True | |
648 for n in names: | |
649 code,text = self.results[n] | |
650 t = "%s: %s" % (n, builder.Results[code]) | |
651 if text: | |
652 t += " (%s)" % " ".join(text) | |
653 print t | |
654 if self.results[n] != builder.SUCCESS: | |
655 happy = False | |
656 | |
657 if happy: | |
658 self.exitcode = 0 | |
659 else: | |
660 self.exitcode = 1 | |
661 self.running.callback(self.exitcode) | |
662 | |
663 def announce(self, message): | |
664 if not self.quiet: | |
665 print message | |
666 | |
667 def run(self): | |
668 # we can't do spawnProcess until we're inside reactor.run(), so get | |
669 # funky | |
670 print "using '%s' connect method" % self.connect | |
671 self.exitcode = 0 | |
672 d = defer.Deferred() | |
673 d.addCallback(lambda res: self.createJob()) | |
674 d.addCallback(lambda res: self.announce("job created")) | |
675 deliver = self.deliverJob | |
676 if bool(self.config.get("dryrun")): | |
677 deliver = self.fakeDeliverJob | |
678 d.addCallback(lambda res: deliver()) | |
679 d.addCallback(lambda res: self.announce("job has been delivered")) | |
680 d.addCallback(lambda res: self.getStatus()) | |
681 d.addErrback(log.err) | |
682 d.addCallback(self.cleanup) | |
683 d.addCallback(lambda res: reactor.stop()) | |
684 | |
685 reactor.callLater(0, d.callback, None) | |
686 reactor.run() | |
687 sys.exit(self.exitcode) | |
688 | |
689 def logErr(self, why): | |
690 log.err(why) | |
691 print "error during 'try' processing" | |
692 print why | |
693 | |
694 def cleanup(self, res=None): | |
695 if self.buildsetStatus: | |
696 self.buildsetStatus.broker.transport.loseConnection() | |
697 | |
698 | |
699 | |
OLD | NEW |