Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(151)

Side by Side Diff: third_party/buildbot_7_12/buildbot/slave/commands.py

Issue 12207158: Bye bye buildbot 0.7.12. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/tools/build
Patch Set: Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 # -*- test-case-name: buildbot.test.test_slavecommand -*-
2
3 import os, sys, re, signal, shutil, types, time, tarfile, tempfile
4 from stat import ST_CTIME, ST_MTIME, ST_SIZE
5 from xml.dom.minidom import parseString
6
7 from zope.interface import implements
8 from twisted.internet.protocol import ProcessProtocol
9 from twisted.internet import reactor, defer, task
10 from twisted.python import log, failure, runtime
11 from twisted.python.procutils import which
12
13 from buildbot.slave.interfaces import ISlaveCommand
14 from buildbot.slave.registry import registerSlaveCommand
15 from buildbot.util import to_text, remove_userpassword
16
17 # this used to be a CVS $-style "Revision" auto-updated keyword, but since I
18 # moved to Darcs as the primary repository, this is updated manually each
19 # time this file is changed. The last cvs_ver that was here was 1.51 .
20 command_version = "2.9"
21
22 # version history:
23 # >=1.17: commands are interruptable
24 # >=1.28: Arch understands 'revision', added Bazaar
25 # >=1.33: Source classes understand 'retry'
26 # >=1.39: Source classes correctly handle changes in branch (except Git)
27 # Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
28 # Arch/Baz should accept 'build-config'
29 # >=1.51: (release 0.7.3)
30 # >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
31 # and 'logfiles'. It now sends 'log' messages in addition to
32 # stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
33 # but these are not remotely callable yet.
34 # (not externally visible: ShellCommandPP has writeStdin/closeStdin.
35 # ShellCommand accepts new arguments (logfiles=, initialStdin=,
36 # keepStdinOpen=) and no longer accepts stdin=)
37 # (release 0.7.4)
38 # >= 2.2: added monotone, uploadFile, and downloadFile (release 0.7.5)
39 # >= 2.3: added bzr (release 0.7.6)
40 # >= 2.4: Git understands 'revision' and branches
41 # >= 2.5: workaround added for remote 'hg clone --rev REV' when hg<0.9.2
42 # >= 2.6: added uploadDirectory
43 # >= 2.7: added usePTY option to SlaveShellCommand
44 # >= 2.8: added username and password args to SVN class
45
46 class CommandInterrupted(Exception):
47 pass
48 class TimeoutError(Exception):
49 pass
50
51 class Obfuscated:
52 """An obfuscated string in a command"""
53 def __init__(self, real, fake):
54 self.real = real
55 self.fake = fake
56
57 def __str__(self):
58 return self.fake
59
60 def __repr__(self):
61 return `self.fake`
62
63 def get_real(command):
64 rv = command
65 if type(command) == types.ListType:
66 rv = []
67 for elt in command:
68 if isinstance(elt, Obfuscated):
69 rv.append(elt.real)
70 else:
71 rv.append(to_text(elt))
72 return rv
73 get_real = staticmethod(get_real)
74
75 def get_fake(command):
76 rv = command
77 if type(command) == types.ListType:
78 rv = []
79 for elt in command:
80 if isinstance(elt, Obfuscated):
81 rv.append(elt.fake)
82 else:
83 rv.append(to_text(elt))
84 return rv
85 get_fake = staticmethod(get_fake)
86
87 class AbandonChain(Exception):
88 """A series of chained steps can raise this exception to indicate that
89 one of the intermediate ShellCommands has failed, such that there is no
90 point in running the remainder. 'rc' should be the non-zero exit code of
91 the failing ShellCommand."""
92
93 def __repr__(self):
94 return "<AbandonChain rc=%s>" % self.args[0]
95
96 def getCommand(name):
97 possibles = which(name)
98 if not possibles:
99 raise RuntimeError("Couldn't find executable for '%s'" % name)
100 return possibles[0]
101
102 def rmdirRecursive(dir):
103 """This is a replacement for shutil.rmtree that works better under
104 windows. Thanks to Bear at the OSAF for the code."""
105 if not os.path.exists(dir):
106 return
107
108 if os.path.islink(dir):
109 os.remove(dir)
110 return
111
112 # Verify the directory is read/write/execute for the current user
113 os.chmod(dir, 0700)
114
115 for name in os.listdir(dir):
116 full_name = os.path.join(dir, name)
117 # on Windows, if we don't have write permission we can't remove
118 # the file/directory either, so turn that on
119 if os.name == 'nt':
120 if not os.access(full_name, os.W_OK):
121 # I think this is now redundant, but I don't have an NT
122 # machine to test on, so I'm going to leave it in place
123 # -warner
124 os.chmod(full_name, 0600)
125
126 if os.path.isdir(full_name):
127 rmdirRecursive(full_name)
128 else:
129 if os.path.isfile(full_name):
130 os.chmod(full_name, 0700)
131 os.remove(full_name)
132 os.rmdir(dir)
133
134 class ShellCommandPP(ProcessProtocol):
135 debug = False
136
137 def __init__(self, command):
138 self.command = command
139 self.pending_stdin = ""
140 self.stdin_finished = False
141
142 def writeStdin(self, data):
143 assert not self.stdin_finished
144 if self.connected:
145 self.transport.write(data)
146 else:
147 self.pending_stdin += data
148
149 def closeStdin(self):
150 if self.connected:
151 if self.debug: log.msg(" closing stdin")
152 self.transport.closeStdin()
153 self.stdin_finished = True
154
155 def connectionMade(self):
156 if self.debug:
157 log.msg("ShellCommandPP.connectionMade")
158 if not self.command.process:
159 if self.debug:
160 log.msg(" assigning self.command.process: %s" %
161 (self.transport,))
162 self.command.process = self.transport
163
164 # TODO: maybe we shouldn't close stdin when using a PTY. I can't test
165 # this yet, recent debian glibc has a bug which causes thread-using
166 # test cases to SIGHUP trial, and the workaround is to either run
167 # the whole test with /bin/sh -c " ".join(argv) (way gross) or to
168 # not use a PTY. Once the bug is fixed, I'll be able to test what
169 # happens when you close stdin on a pty. My concern is that it will
170 # SIGHUP the child (since we are, in a sense, hanging up on them).
171 # But it may well be that keeping stdout open prevents the SIGHUP
172 # from being sent.
173 #if not self.command.usePTY:
174
175 if self.pending_stdin:
176 if self.debug: log.msg(" writing to stdin")
177 self.transport.write(self.pending_stdin)
178 if self.stdin_finished:
179 if self.debug: log.msg(" closing stdin")
180 self.transport.closeStdin()
181
182 def outReceived(self, data):
183 if self.debug:
184 log.msg("ShellCommandPP.outReceived")
185 self.command.addStdout(data)
186
187 def errReceived(self, data):
188 if self.debug:
189 log.msg("ShellCommandPP.errReceived")
190 self.command.addStderr(data)
191
192 def processEnded(self, status_object):
193 if self.debug:
194 log.msg("ShellCommandPP.processEnded", status_object)
195 # status_object is a Failure wrapped around an
196 # error.ProcessTerminated or and error.ProcessDone.
197 # requires twisted >= 1.0.4 to overcome a bug in process.py
198 sig = status_object.value.signal
199 rc = status_object.value.exitCode
200 self.command.finished(sig, rc)
201
202 class LogFileWatcher:
203 POLL_INTERVAL = 2
204
205 def __init__(self, command, name, logfile, follow=False):
206 self.command = command
207 self.name = name
208 self.logfile = logfile
209
210 log.msg("LogFileWatcher created to watch %s" % logfile)
211 # we are created before the ShellCommand starts. If the logfile we're
212 # supposed to be watching already exists, record its size and
213 # ctime/mtime so we can tell when it starts to change.
214 self.old_logfile_stats = self.statFile()
215 self.started = False
216
217 # follow the file, only sending back lines
218 # added since we started watching
219 self.follow = follow
220
221 # every 2 seconds we check on the file again
222 self.poller = task.LoopingCall(self.poll)
223
224 def start(self):
225 self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
226
227 def _cleanupPoll(self, err):
228 log.err(err, msg="Polling error")
229 self.poller = None
230
231 def stop(self):
232 self.poll()
233 if self.poller is not None:
234 self.poller.stop()
235 if self.started:
236 self.f.close()
237
238 def statFile(self):
239 if os.path.exists(self.logfile):
240 s = os.stat(self.logfile)
241 return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE])
242 return None
243
244 def poll(self):
245 if not self.started:
246 s = self.statFile()
247 if s == self.old_logfile_stats:
248 return # not started yet
249 if not s:
250 # the file was there, but now it's deleted. Forget about the
251 # initial state, clearly the process has deleted the logfile
252 # in preparation for creating a new one.
253 self.old_logfile_stats = None
254 return # no file to work with
255 self.f = open(self.logfile, "rb")
256 # if we only want new lines, seek to
257 # where we stat'd so we only find new
258 # lines
259 if self.follow:
260 self.f.seek(s[2], 0)
261 self.started = True
262 self.f.seek(self.f.tell(), 0)
263 while True:
264 data = self.f.read(10000)
265 if not data:
266 return
267 self.command.addLogfile(self.name, data)
268
269
270 class ShellCommand:
271 # This is a helper class, used by SlaveCommands to run programs in a
272 # child shell.
273
274 notreally = False
275 BACKUP_TIMEOUT = 5
276 KILL = "KILL"
277 CHUNK_LIMIT = 128*1024
278
279 # For sending elapsed time:
280 startTime = None
281 elapsedTime = None
282 # I wish we had easy access to CLOCK_MONOTONIC in Python:
283 # http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html
284 # Then changes to the system clock during a run wouldn't effect the "elapsed
285 # time" results.
286
287 def __init__(self, builder, command,
288 workdir, environ=None,
289 sendStdout=True, sendStderr=True, sendRC=True,
290 timeout=None, maxTime=None, initialStdin=None,
291 keepStdinOpen=False, keepStdout=False, keepStderr=False,
292 logEnviron=True, logfiles={}, usePTY="slave-config"):
293 """
294
295 @param keepStdout: if True, we keep a copy of all the stdout text
296 that we've seen. This copy is available in
297 self.stdout, which can be read after the command
298 has finished.
299 @param keepStderr: same, for stderr
300
301 @param usePTY: "slave-config" -> use the SlaveBuilder's usePTY;
302 otherwise, true to use a PTY, false to not use a PTY.
303 """
304
305 self.builder = builder
306 self.command = Obfuscated.get_real(command)
307 self.fake_command = Obfuscated.get_fake(command)
308 self.sendStdout = sendStdout
309 self.sendStderr = sendStderr
310 self.sendRC = sendRC
311 self.logfiles = logfiles
312 self.workdir = workdir
313 if not os.path.exists(workdir):
314 os.makedirs(workdir)
315 self.environ = os.environ.copy()
316 if environ:
317 if environ.has_key('PYTHONPATH'):
318 ppath = environ['PYTHONPATH']
319 # Need to do os.pathsep translation. We could either do that
320 # by replacing all incoming ':'s with os.pathsep, or by
321 # accepting lists. I like lists better.
322 if not isinstance(ppath, str):
323 # If it's not a string, treat it as a sequence to be
324 # turned in to a string.
325 ppath = os.pathsep.join(ppath)
326
327 if self.environ.has_key('PYTHONPATH'):
328 # special case, prepend the builder's items to the
329 # existing ones. This will break if you send over empty
330 # strings, so don't do that.
331 ppath = ppath + os.pathsep + self.environ['PYTHONPATH']
332
333 environ['PYTHONPATH'] = ppath
334
335 self.environ.update(environ)
336 self.initialStdin = initialStdin
337 self.keepStdinOpen = keepStdinOpen
338 self.logEnviron = logEnviron
339 self.timeout = timeout
340 self.timer = None
341 self.maxTime = maxTime
342 self.maxTimer = None
343 self.keepStdout = keepStdout
344 self.keepStderr = keepStderr
345
346
347 if usePTY == "slave-config":
348 self.usePTY = self.builder.usePTY
349 else:
350 self.usePTY = usePTY
351
352 # usePTY=True is a convenience for cleaning up all children and
353 # grandchildren of a hung command. Fall back to usePTY=False on systems
354 # and in situations where ptys cause problems. PTYs are posix-only,
355 # and for .closeStdin to matter, we must use a pipe, not a PTY
356 if runtime.platformType != "posix" or initialStdin is not None:
357 if self.usePTY and usePTY != "slave-config":
358 self.sendStatus({'header': "WARNING: disabling usePTY for this c ommand"})
359 self.usePTY = False
360
361 self.logFileWatchers = []
362 for name,filevalue in self.logfiles.items():
363 filename = filevalue
364 follow = False
365
366 # check for a dictionary of options
367 # filename is required, others are optional
368 if type(filevalue) == dict:
369 filename = filevalue['filename']
370 follow = filevalue.get('follow', False)
371
372 w = LogFileWatcher(self, name,
373 os.path.join(self.workdir, filename),
374 follow=follow)
375 self.logFileWatchers.append(w)
376
377 def __repr__(self):
378 return "<slavecommand.ShellCommand '%s'>" % self.fake_command
379
380 def sendStatus(self, status):
381 self.builder.sendUpdate(status)
382
383 def start(self):
384 # return a Deferred which fires (with the exit code) when the command
385 # completes
386 if self.keepStdout:
387 self.stdout = ""
388 if self.keepStderr:
389 self.stderr = ""
390 self.deferred = defer.Deferred()
391 try:
392 self._startCommand()
393 except:
394 log.msg("error in ShellCommand._startCommand")
395 log.err()
396 # pretend it was a shell error
397 self.deferred.errback(AbandonChain(-1))
398 return self.deferred
399
400 def _startCommand(self):
401 # ensure workdir exists. Use os.path.normpath because this can be
402 # called with trailing '..' components, which can cause os.makedirs
403 # to fail.
404 workdir = os.path.normpath(self.workdir)
405 if not os.path.isdir(workdir):
406 os.makedirs(workdir)
407 log.msg("ShellCommand._startCommand")
408 if self.notreally:
409 self.sendStatus({'header': "command '%s' in dir %s" % \
410 (self.fake_command, self.workdir)})
411 self.sendStatus({'header': "(not really)\n"})
412 self.finished(None, 0)
413 return
414
415 self.pp = ShellCommandPP(self)
416
417 if type(self.command) in types.StringTypes:
418 if runtime.platformType == 'win32':
419 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have a rgs
420 if '/c' not in argv: argv += ['/c']
421 argv += [self.command]
422 else:
423 # for posix, use /bin/sh. for other non-posix, well, doesn't
424 # hurt to try
425 argv = ['/bin/sh', '-c', self.command]
426 display = self.fake_command
427 else:
428 if runtime.platformType == 'win32' and not self.command[0].lower(). endswith(".exe"):
429 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have a rgs
430 if '/c' not in argv: argv += ['/c']
431 argv += list(self.command)
432 else:
433 argv = self.command
434 display = " ".join(self.fake_command)
435
436 # $PWD usually indicates the current directory; spawnProcess may not
437 # update this value, though, so we set it explicitly here. This causes
438 # weird problems (bug #456) on msys, though..
439 if not self.environ.get('MACHTYPE', None) == 'i686-pc-msys':
440 self.environ['PWD'] = os.path.abspath(self.workdir)
441
442 # self.stdin is handled in ShellCommandPP.connectionMade
443
444 # first header line is the command in plain text, argv joined with
445 # spaces. You should be able to cut-and-paste this into a shell to
446 # obtain the same results. If there are spaces in the arguments, too
447 # bad.
448 log.msg(" " + display)
449 self.sendStatus({'header': display+"\n"})
450
451 # then comes the secondary information
452 msg = " in dir %s" % (self.workdir,)
453 if self.timeout:
454 msg += " (timeout %d secs)" % (self.timeout,)
455 log.msg(" " + msg)
456 self.sendStatus({'header': msg+"\n"})
457
458 msg = " watching logfiles %s" % (self.logfiles,)
459 log.msg(" " + msg)
460 self.sendStatus({'header': msg+"\n"})
461
462 # then the obfuscated command array for resolving unambiguity
463 msg = " argv: %s" % (self.fake_command,)
464 log.msg(" " + msg)
465 self.sendStatus({'header': msg+"\n"})
466
467 # then the environment, since it sometimes causes problems
468 if self.logEnviron:
469 msg = " environment:\n"
470 env_names = self.environ.keys()
471 env_names.sort()
472 for name in env_names:
473 msg += " %s=%s\n" % (name, self.environ[name])
474 log.msg(" environment: %s" % (self.environ,))
475 self.sendStatus({'header': msg})
476
477 if self.initialStdin:
478 msg = " writing %d bytes to stdin" % len(self.initialStdin)
479 log.msg(" " + msg)
480 self.sendStatus({'header': msg+"\n"})
481
482 if self.keepStdinOpen:
483 msg = " leaving stdin open"
484 else:
485 msg = " closing stdin"
486 log.msg(" " + msg)
487 self.sendStatus({'header': msg+"\n"})
488
489 msg = " using PTY: %s" % bool(self.usePTY)
490 log.msg(" " + msg)
491 self.sendStatus({'header': msg+"\n"})
492
493 # this will be buffered until connectionMade is called
494 if self.initialStdin:
495 self.pp.writeStdin(self.initialStdin)
496 if not self.keepStdinOpen:
497 self.pp.closeStdin()
498
499 # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns
500 # None, as opposed to all the posixbase-derived reactors (which
501 # return the new Process object). This is a nuisance. We can make up
502 # for it by having the ProcessProtocol give us their .transport
503 # attribute after they get one. I'd prefer to get it from
504 # spawnProcess because I'm concerned about returning from this method
505 # without having a valid self.process to work with. (if kill() were
506 # called right after we return, but somehow before connectionMade
507 # were called, then kill() would blow up).
508 self.process = None
509 self.startTime = time.time()
510
511 p = reactor.spawnProcess(self.pp, argv[0], argv,
512 self.environ,
513 self.workdir,
514 usePTY=self.usePTY)
515 # connectionMade might have been called during spawnProcess
516 if not self.process:
517 self.process = p
518
519 # connectionMade also closes stdin as long as we're not using a PTY.
520 # This is intended to kill off inappropriately interactive commands
521 # better than the (long) hung-command timeout. ProcessPTY should be
522 # enhanced to allow the same childFDs argument that Process takes,
523 # which would let us connect stdin to /dev/null .
524
525 if self.timeout:
526 self.timer = reactor.callLater(self.timeout, self.doTimeout)
527
528 if self.maxTime:
529 self.maxTimer = reactor.callLater(self.maxTime, self.doMaxTimeout)
530
531 for w in self.logFileWatchers:
532 w.start()
533
534
535 def _chunkForSend(self, data):
536 # limit the chunks that we send over PB to 128k, since it has a
537 # hardwired string-size limit of 640k.
538 LIMIT = self.CHUNK_LIMIT
539 for i in range(0, len(data), LIMIT):
540 yield data[i:i+LIMIT]
541
542 def addStdout(self, data):
543 if self.sendStdout:
544 for chunk in self._chunkForSend(data):
545 self.sendStatus({'stdout': chunk})
546 if self.keepStdout:
547 self.stdout += data
548 if self.timer:
549 self.timer.reset(self.timeout)
550
551 def addStderr(self, data):
552 if self.sendStderr:
553 for chunk in self._chunkForSend(data):
554 self.sendStatus({'stderr': chunk})
555 if self.keepStderr:
556 self.stderr += data
557 if self.timer:
558 self.timer.reset(self.timeout)
559
560 def addLogfile(self, name, data):
561 for chunk in self._chunkForSend(data):
562 self.sendStatus({'log': (name, chunk)})
563 if self.timer:
564 self.timer.reset(self.timeout)
565
566 def finished(self, sig, rc):
567 self.elapsedTime = time.time() - self.startTime
568 log.msg("command finished with signal %s, exit code %s, elapsedTime: %0. 6f" % (sig,rc,self.elapsedTime))
569 for w in self.logFileWatchers:
570 # this will send the final updates
571 w.stop()
572 if sig is not None:
573 rc = -1
574 if self.sendRC:
575 if sig is not None:
576 self.sendStatus(
577 {'header': "process killed by signal %d\n" % sig})
578 self.sendStatus({'rc': rc})
579 self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime})
580 if self.timer:
581 self.timer.cancel()
582 self.timer = None
583 if self.maxTimer:
584 self.maxTimer.cancel()
585 self.maxTimer = None
586 d = self.deferred
587 self.deferred = None
588 if d:
589 d.callback(rc)
590 else:
591 log.msg("Hey, command %s finished twice" % self)
592
593 def failed(self, why):
594 log.msg("ShellCommand.failed: command failed: %s" % (why,))
595 if self.timer:
596 self.timer.cancel()
597 self.timer = None
598 if self.maxTimer:
599 self.maxTimer.cancel()
600 self.maxTimer = None
601 d = self.deferred
602 self.deferred = None
603 if d:
604 d.errback(why)
605 else:
606 log.msg("Hey, command %s finished twice" % self)
607
608 def doTimeout(self):
609 self.timer = None
610 msg = "command timed out: %d seconds without output" % self.timeout
611 self.kill(msg)
612
613 def doMaxTimeout(self):
614 self.maxTimer = None
615 msg = "command timed out: %d seconds elapsed" % self.maxTime
616 self.kill(msg)
617
618 def kill(self, msg):
619 # This may be called by the timeout, or when the user has decided to
620 # abort this build.
621 if self.timer:
622 self.timer.cancel()
623 self.timer = None
624 if self.maxTimer:
625 self.maxTimer.cancel()
626 self.maxTimer = None
627 if hasattr(self.process, "pid") and self.process.pid is not None:
628 msg += ", killing pid %s" % self.process.pid
629 log.msg(msg)
630 self.sendStatus({'header': "\n" + msg + "\n"})
631
632 hit = 0
633 if runtime.platformType == "posix":
634 try:
635 # really want to kill off all child processes too. Process
636 # Groups are ideal for this, but that requires
637 # spawnProcess(usePTY=1). Try both ways in case process was
638 # not started that way.
639
640 # the test suite sets self.KILL=None to tell us we should
641 # only pretend to kill the child. This lets us test the
642 # backup timer.
643
644 sig = None
645 if self.KILL is not None:
646 sig = getattr(signal, "SIG"+ self.KILL, None)
647
648 if self.KILL == None:
649 log.msg("self.KILL==None, only pretending to kill child")
650 elif sig is None:
651 log.msg("signal module is missing SIG%s" % self.KILL)
652 elif not hasattr(os, "kill"):
653 log.msg("os module is missing the 'kill' function")
654 elif not hasattr(self.process, "pid") or self.process.pid is Non e:
655 log.msg("self.process has no pid")
656 else:
657 log.msg("trying os.kill(-pid, %d)" % (sig,))
658 # TODO: maybe use os.killpg instead of a negative pid?
659 os.kill(-self.process.pid, sig)
660 log.msg(" signal %s sent successfully" % sig)
661 hit = 1
662 except OSError:
663 # probably no-such-process, maybe because there is no process
664 # group
665 pass
666 if not hit:
667 try:
668 if self.KILL is None:
669 log.msg("self.KILL==None, only pretending to kill child")
670 else:
671 log.msg("trying process.signalProcess('KILL')")
672 self.process.signalProcess(self.KILL)
673 log.msg(" signal %s sent successfully" % (self.KILL,))
674 hit = 1
675 except OSError:
676 # could be no-such-process, because they finished very recently
677 pass
678 if not hit:
679 log.msg("signalProcess/os.kill failed both times")
680
681 if runtime.platformType == "posix":
682 # we only do this under posix because the win32eventreactor
683 # blocks here until the process has terminated, while closing
684 # stderr. This is weird.
685 self.pp.transport.loseConnection()
686
687 # finished ought to be called momentarily. Just in case it doesn't,
688 # set a timer which will abandon the command.
689 self.timer = reactor.callLater(self.BACKUP_TIMEOUT,
690 self.doBackupTimeout)
691
692 def doBackupTimeout(self):
693 log.msg("we tried to kill the process, and it wouldn't die.."
694 " finish anyway")
695 self.timer = None
696 self.sendStatus({'header': "SIGKILL failed to kill process\n"})
697 if self.sendRC:
698 self.sendStatus({'header': "using fake rc=-1\n"})
699 self.sendStatus({'rc': -1})
700 self.failed(TimeoutError("SIGKILL failed to kill process"))
701
702
703 def writeStdin(self, data):
704 self.pp.writeStdin(data)
705
706 def closeStdin(self):
707 self.pp.closeStdin()
708
709
710 class Command:
711 implements(ISlaveCommand)
712
713 """This class defines one command that can be invoked by the build master.
714 The command is executed on the slave side, and always sends back a
715 completion message when it finishes. It may also send intermediate status
716 as it runs (by calling builder.sendStatus). Some commands can be
717 interrupted (either by the build master or a local timeout), in which
718 case the step is expected to complete normally with a status message that
719 indicates an error occurred.
720
721 These commands are used by BuildSteps on the master side. Each kind of
722 BuildStep uses a single Command. The slave must implement all the
723 Commands required by the set of BuildSteps used for any given build:
724 this is checked at startup time.
725
726 All Commands are constructed with the same signature:
727 c = CommandClass(builder, args)
728 where 'builder' is the parent SlaveBuilder object, and 'args' is a
729 dict that is interpreted per-command.
730
731 The setup(args) method is available for setup, and is run from __init__.
732
733 The Command is started with start(). This method must be implemented in a
734 subclass, and it should return a Deferred. When your step is done, you
735 should fire the Deferred (the results are not used). If the command is
736 interrupted, it should fire the Deferred anyway.
737
738 While the command runs. it may send status messages back to the
739 buildmaster by calling self.sendStatus(statusdict). The statusdict is
740 interpreted by the master-side BuildStep however it likes.
741
742 A separate completion message is sent when the deferred fires, which
743 indicates that the Command has finished, but does not carry any status
744 data. If the Command needs to return an exit code of some sort, that
745 should be sent as a regular status message before the deferred is fired .
746 Once builder.commandComplete has been run, no more status messages may be
747 sent.
748
749 If interrupt() is called, the Command should attempt to shut down as
750 quickly as possible. Child processes should be killed, new ones should
751 not be started. The Command should send some kind of error status update,
752 then complete as usual by firing the Deferred.
753
754 .interrupted should be set by interrupt(), and can be tested to avoid
755 sending multiple error status messages.
756
757 If .running is False, the bot is shutting down (or has otherwise lost the
758 connection to the master), and should not send any status messages. This
759 is checked in Command.sendStatus .
760
761 """
762
763 # builder methods:
764 # sendStatus(dict) (zero or more)
765 # commandComplete() or commandInterrupted() (one, at end)
766
767 debug = False
768 interrupted = False
769 running = False # set by Builder, cleared on shutdown or when the
770 # Deferred fires
771
772 def __init__(self, builder, stepId, args):
773 self.builder = builder
774 self.stepId = stepId # just for logging
775 self.args = args
776 self.setup(args)
777
778 def setup(self, args):
779 """Override this in a subclass to extract items from the args dict."""
780 pass
781
782 def doStart(self):
783 self.running = True
784 d = defer.maybeDeferred(self.start)
785 d.addBoth(self.commandComplete)
786 return d
787
788 def start(self):
789 """Start the command. This method should return a Deferred that will
790 fire when the command has completed. The Deferred's argument will be
791 ignored.
792
793 This method should be overridden by subclasses."""
794 raise NotImplementedError, "You must implement this in a subclass"
795
796 def sendStatus(self, status):
797 """Send a status update to the master."""
798 if self.debug:
799 log.msg("sendStatus", status)
800 if not self.running:
801 log.msg("would sendStatus but not .running")
802 return
803 self.builder.sendUpdate(status)
804
805 def doInterrupt(self):
806 self.running = False
807 self.interrupt()
808
809 def interrupt(self):
810 """Override this in a subclass to allow commands to be interrupted.
811 May be called multiple times, test and set self.interrupted=True if
812 this matters."""
813 pass
814
815 def commandComplete(self, res):
816 self.running = False
817 return res
818
819 # utility methods, mostly used by SlaveShellCommand and the like
820
821 def _abandonOnFailure(self, rc):
822 if type(rc) is not int:
823 log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
824 (rc, type(rc)))
825 assert isinstance(rc, int)
826 if rc != 0:
827 raise AbandonChain(rc)
828 return rc
829
830 def _sendRC(self, res):
831 self.sendStatus({'rc': 0})
832
833 def _checkAbandoned(self, why):
834 log.msg("_checkAbandoned", why)
835 why.trap(AbandonChain)
836 log.msg(" abandoning chain", why.value)
837 self.sendStatus({'rc': why.value.args[0]})
838 return None
839
840
841
842 class SlaveFileUploadCommand(Command):
843 """
844 Upload a file from slave to build master
845 Arguments:
846
847 - ['workdir']: base directory to use
848 - ['slavesrc']: name of the slave-side file to read from
849 - ['writer']: RemoteReference to a transfer._FileWriter object
850 - ['maxsize']: max size (in bytes) of file to write
851 - ['blocksize']: max size for each data block
852 """
853 debug = False
854
855 def setup(self, args):
856 self.workdir = args['workdir']
857 self.filename = args['slavesrc']
858 self.writer = args['writer']
859 self.remaining = args['maxsize']
860 self.blocksize = args['blocksize']
861 self.stderr = None
862 self.rc = 0
863
864 def start(self):
865 if self.debug:
866 log.msg('SlaveFileUploadCommand started')
867
868 # Open file
869 self.path = os.path.join(self.builder.basedir,
870 self.workdir,
871 os.path.expanduser(self.filename))
872 try:
873 self.fp = open(self.path, 'rb')
874 if self.debug:
875 log.msg('Opened %r for upload' % self.path)
876 except:
877 # TODO: this needs cleanup
878 self.fp = None
879 self.stderr = 'Cannot open file %r for upload' % self.path
880 self.rc = 1
881 if self.debug:
882 log.msg('Cannot open file %r for upload' % self.path)
883
884 self.sendStatus({'header': "sending %s" % self.path})
885
886 d = defer.Deferred()
887 reactor.callLater(0, self._loop, d)
888 def _close(res):
889 # close the file, but pass through any errors from _loop
890 d1 = self.writer.callRemote("close")
891 d1.addErrback(log.err)
892 d1.addCallback(lambda ignored: res)
893 return d1
894 d.addBoth(_close)
895 d.addBoth(self.finished)
896 return d
897
898 def _loop(self, fire_when_done):
899 d = defer.maybeDeferred(self._writeBlock)
900 def _done(finished):
901 if finished:
902 fire_when_done.callback(None)
903 else:
904 self._loop(fire_when_done)
905 def _err(why):
906 fire_when_done.errback(why)
907 d.addCallbacks(_done, _err)
908 return None
909
910 def _writeBlock(self):
911 """Write a block of data to the remote writer"""
912
913 if self.interrupted or self.fp is None:
914 if self.debug:
915 log.msg('SlaveFileUploadCommand._writeBlock(): end')
916 return True
917
918 length = self.blocksize
919 if self.remaining is not None and length > self.remaining:
920 length = self.remaining
921
922 if length <= 0:
923 if self.stderr is None:
924 self.stderr = 'Maximum filesize reached, truncating file %r' \
925 % self.path
926 self.rc = 1
927 data = ''
928 else:
929 data = self.fp.read(length)
930
931 if self.debug:
932 log.msg('SlaveFileUploadCommand._writeBlock(): '+
933 'allowed=%d readlen=%d' % (length, len(data)))
934 if len(data) == 0:
935 log.msg("EOF: callRemote(close)")
936 return True
937
938 if self.remaining is not None:
939 self.remaining = self.remaining - len(data)
940 assert self.remaining >= 0
941 d = self.writer.callRemote('write', data)
942 d.addCallback(lambda res: False)
943 return d
944
945 def interrupt(self):
946 if self.debug:
947 log.msg('interrupted')
948 if self.interrupted:
949 return
950 if self.stderr is None:
951 self.stderr = 'Upload of %r interrupted' % self.path
952 self.rc = 1
953 self.interrupted = True
954 # the next _writeBlock call will notice the .interrupted flag
955
956 def finished(self, res):
957 if self.debug:
958 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
959 if self.stderr is None:
960 self.sendStatus({'rc': self.rc})
961 else:
962 self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
963 return res
964
965 registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version)
966
967
968 class SlaveDirectoryUploadCommand(SlaveFileUploadCommand):
969 """
970 Upload a directory from slave to build master
971 Arguments:
972
973 - ['workdir']: base directory to use
974 - ['slavesrc']: name of the slave-side directory to read from
975 - ['writer']: RemoteReference to a transfer._DirectoryWriter object
976 - ['maxsize']: max size (in bytes) of file to write
977 - ['blocksize']: max size for each data block
978 - ['compress']: one of [None, 'bz2', 'gz']
979 """
980 debug = True
981
982 def setup(self, args):
983 self.workdir = args['workdir']
984 self.dirname = args['slavesrc']
985 self.writer = args['writer']
986 self.remaining = args['maxsize']
987 self.blocksize = args['blocksize']
988 self.compress = args['compress']
989 self.stderr = None
990 self.rc = 0
991
992 def start(self):
993 if self.debug:
994 log.msg('SlaveDirectoryUploadCommand started')
995
996 self.path = os.path.join(self.builder.basedir,
997 self.workdir,
998 os.path.expanduser(self.dirname))
999 if self.debug:
1000 log.msg("path: %r" % self.path)
1001
1002 # Create temporary archive
1003 fd, self.tarname = tempfile.mkstemp()
1004 fileobj = os.fdopen(fd, 'w')
1005 if self.compress == 'bz2':
1006 mode='w|bz2'
1007 elif self.compress == 'gz':
1008 mode='w|gz'
1009 else:
1010 mode = 'w'
1011 archive = tarfile.open(name=self.tarname, mode=mode, fileobj=fileobj)
1012 archive.add(self.path, '')
1013 archive.close()
1014 fileobj.close()
1015
1016 # Transfer it
1017 self.fp = open(self.tarname, 'rb')
1018
1019 self.sendStatus({'header': "sending %s" % self.path})
1020
1021 d = defer.Deferred()
1022 reactor.callLater(0, self._loop, d)
1023 def unpack(res):
1024 # unpack the archive, but pass through any errors from _loop
1025 d1 = self.writer.callRemote("unpack")
1026 d1.addErrback(log.err)
1027 d1.addCallback(lambda ignored: res)
1028 return d1
1029 d.addCallback(unpack)
1030 d.addBoth(self.finished)
1031 return d
1032
1033 def finished(self, res):
1034 self.fp.close()
1035 os.remove(self.tarname)
1036 if self.debug:
1037 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
1038 if self.stderr is None:
1039 self.sendStatus({'rc': self.rc})
1040 else:
1041 self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
1042 return res
1043
1044 registerSlaveCommand("uploadDirectory", SlaveDirectoryUploadCommand, command_ver sion)
1045
1046
1047 class SlaveFileDownloadCommand(Command):
1048 """
1049 Download a file from master to slave
1050 Arguments:
1051
1052 - ['workdir']: base directory to use
1053 - ['slavedest']: name of the slave-side file to be created
1054 - ['reader']: RemoteReference to a transfer._FileReader object
1055 - ['maxsize']: max size (in bytes) of file to write
1056 - ['blocksize']: max size for each data block
1057 - ['mode']: access mode for the new file
1058 """
1059 debug = False
1060
1061 def setup(self, args):
1062 self.workdir = args['workdir']
1063 self.filename = args['slavedest']
1064 self.reader = args['reader']
1065 self.bytes_remaining = args['maxsize']
1066 self.blocksize = args['blocksize']
1067 self.mode = args['mode']
1068 self.stderr = None
1069 self.rc = 0
1070
1071 def start(self):
1072 if self.debug:
1073 log.msg('SlaveFileDownloadCommand starting')
1074
1075 # Open file
1076 self.path = os.path.join(self.builder.basedir,
1077 self.workdir,
1078 os.path.expanduser(self.filename))
1079
1080 dirname = os.path.dirname(self.path)
1081 if not os.path.exists(dirname):
1082 os.makedirs(dirname)
1083
1084 try:
1085 self.fp = open(self.path, 'wb')
1086 if self.debug:
1087 log.msg('Opened %r for download' % self.path)
1088 if self.mode is not None:
1089 # note: there is a brief window during which the new file
1090 # will have the buildslave's default (umask) mode before we
1091 # set the new one. Don't use this mode= feature to keep files
1092 # private: use the buildslave's umask for that instead. (it
1093 # is possible to call os.umask() before and after the open()
1094 # call, but cleaning up from exceptions properly is more of a
1095 # nuisance that way).
1096 os.chmod(self.path, self.mode)
1097 except IOError:
1098 # TODO: this still needs cleanup
1099 self.fp = None
1100 self.stderr = 'Cannot open file %r for download' % self.path
1101 self.rc = 1
1102 if self.debug:
1103 log.msg('Cannot open file %r for download' % self.path)
1104
1105 d = defer.Deferred()
1106 reactor.callLater(0, self._loop, d)
1107 def _close(res):
1108 # close the file, but pass through any errors from _loop
1109 d1 = self.reader.callRemote('close')
1110 d1.addErrback(log.err)
1111 d1.addCallback(lambda ignored: res)
1112 return d1
1113 d.addBoth(_close)
1114 d.addBoth(self.finished)
1115 return d
1116
1117 def _loop(self, fire_when_done):
1118 d = defer.maybeDeferred(self._readBlock)
1119 def _done(finished):
1120 if finished:
1121 fire_when_done.callback(None)
1122 else:
1123 self._loop(fire_when_done)
1124 def _err(why):
1125 fire_when_done.errback(why)
1126 d.addCallbacks(_done, _err)
1127 return None
1128
1129 def _readBlock(self):
1130 """Read a block of data from the remote reader."""
1131
1132 if self.interrupted or self.fp is None:
1133 if self.debug:
1134 log.msg('SlaveFileDownloadCommand._readBlock(): end')
1135 return True
1136
1137 length = self.blocksize
1138 if self.bytes_remaining is not None and length > self.bytes_remaining:
1139 length = self.bytes_remaining
1140
1141 if length <= 0:
1142 if self.stderr is None:
1143 self.stderr = 'Maximum filesize reached, truncating file %r' \
1144 % self.path
1145 self.rc = 1
1146 return True
1147 else:
1148 d = self.reader.callRemote('read', length)
1149 d.addCallback(self._writeData)
1150 return d
1151
1152 def _writeData(self, data):
1153 if self.debug:
1154 log.msg('SlaveFileDownloadCommand._readBlock(): readlen=%d' %
1155 len(data))
1156 if len(data) == 0:
1157 return True
1158
1159 if self.bytes_remaining is not None:
1160 self.bytes_remaining = self.bytes_remaining - len(data)
1161 assert self.bytes_remaining >= 0
1162 self.fp.write(data)
1163 return False
1164
1165 def interrupt(self):
1166 if self.debug:
1167 log.msg('interrupted')
1168 if self.interrupted:
1169 return
1170 if self.stderr is None:
1171 self.stderr = 'Download of %r interrupted' % self.path
1172 self.rc = 1
1173 self.interrupted = True
1174 # now we wait for the next read request to return. _readBlock will
1175 # abandon the file when it sees self.interrupted set.
1176
1177 def finished(self, res):
1178 if self.fp is not None:
1179 self.fp.close()
1180
1181 if self.debug:
1182 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
1183 if self.stderr is None:
1184 self.sendStatus({'rc': self.rc})
1185 else:
1186 self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
1187 return res
1188
1189 registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version)
1190
1191
1192
1193 class SlaveShellCommand(Command):
1194 """This is a Command which runs a shell command. The args dict contains
1195 the following keys:
1196
1197 - ['command'] (required): a shell command to run. If this is a string,
1198 it will be run with /bin/sh (['/bin/sh',
1199 '-c', command]). If it is a list
1200 (preferred), it will be used directly.
1201 - ['workdir'] (required): subdirectory in which the command will be
1202 run, relative to the builder dir
1203 - ['env']: a dict of environment variables to augment/replace
1204 os.environ . PYTHONPATH is treated specially, and
1205 should be a list of path components to be prepended to
1206 any existing PYTHONPATH environment variable.
1207 - ['initial_stdin']: a string which will be written to the command's
1208 stdin as soon as it starts
1209 - ['keep_stdin_open']: unless True, the command's stdin will be
1210 closed as soon as initial_stdin has been
1211 written. Set this to True if you plan to write
1212 to stdin after the command has been started.
1213 - ['want_stdout']: 0 if stdout should be thrown away
1214 - ['want_stderr']: 0 if stderr should be thrown away
1215 - ['usePTY']: True or False if the command should use a PTY (defaults to
1216 configuration of the slave)
1217 - ['not_really']: 1 to skip execution and return rc=0
1218 - ['timeout']: seconds of silence to tolerate before killing command
1219 - ['maxTime']: seconds before killing command
1220 - ['logfiles']: dict mapping LogFile name to the workdir-relative
1221 filename of a local log file. This local file will be
1222 watched just like 'tail -f', and all changes will be
1223 written to 'log' status updates.
1224 - ['logEnviron']: False to not log the environment variables on the slav e
1225
1226 ShellCommand creates the following status messages:
1227 - {'stdout': data} : when stdout data is available
1228 - {'stderr': data} : when stderr data is available
1229 - {'header': data} : when headers (command start/stop) are available
1230 - {'log': (logfile_name, data)} : when log files have new contents
1231 - {'rc': rc} : when the process has terminated
1232 """
1233
1234 def start(self):
1235 args = self.args
1236 # args['workdir'] is relative to Builder directory, and is required.
1237 assert args['workdir'] is not None
1238 workdir = os.path.join(self.builder.basedir, args['workdir'])
1239
1240 c = ShellCommand(self.builder, args['command'],
1241 workdir, environ=args.get('env'),
1242 timeout=args.get('timeout', None),
1243 maxTime=args.get('maxTime', None),
1244 sendStdout=args.get('want_stdout', True),
1245 sendStderr=args.get('want_stderr', True),
1246 sendRC=True,
1247 initialStdin=args.get('initial_stdin'),
1248 keepStdinOpen=args.get('keep_stdin_open'),
1249 logfiles=args.get('logfiles', {}),
1250 usePTY=args.get('usePTY', "slave-config"),
1251 logEnviron=args.get('logEnviron', True),
1252 )
1253 self.command = c
1254 d = self.command.start()
1255 return d
1256
1257 def interrupt(self):
1258 self.interrupted = True
1259 self.command.kill("command interrupted")
1260
1261 def writeStdin(self, data):
1262 self.command.writeStdin(data)
1263
1264 def closeStdin(self):
1265 self.command.closeStdin()
1266
1267 registerSlaveCommand("shell", SlaveShellCommand, command_version)
1268
1269
1270 class DummyCommand(Command):
1271 """
1272 I am a dummy no-op command that by default takes 5 seconds to complete.
1273 See L{buildbot.steps.dummy.RemoteDummy}
1274 """
1275
1276 def start(self):
1277 self.d = defer.Deferred()
1278 log.msg(" starting dummy command [%s]" % self.stepId)
1279 self.timer = reactor.callLater(1, self.doStatus)
1280 return self.d
1281
1282 def interrupt(self):
1283 if self.interrupted:
1284 return
1285 self.timer.cancel()
1286 self.timer = None
1287 self.interrupted = True
1288 self.finished()
1289
1290 def doStatus(self):
1291 log.msg(" sending intermediate status")
1292 self.sendStatus({'stdout': 'data'})
1293 timeout = self.args.get('timeout', 5) + 1
1294 self.timer = reactor.callLater(timeout - 1, self.finished)
1295
1296 def finished(self):
1297 log.msg(" dummy command finished [%s]" % self.stepId)
1298 if self.interrupted:
1299 self.sendStatus({'rc': 1})
1300 else:
1301 self.sendStatus({'rc': 0})
1302 self.d.callback(0)
1303
1304 registerSlaveCommand("dummy", DummyCommand, command_version)
1305
1306
1307 # this maps handle names to a callable. When the WaitCommand starts, this
1308 # callable is invoked with no arguments. It should return a Deferred. When
1309 # that Deferred fires, our WaitCommand will finish.
1310 waitCommandRegistry = {}
1311
1312 class WaitCommand(Command):
1313 """
1314 I am a dummy command used by the buildbot unit test suite. I want for the
1315 unit test to tell us to finish. See L{buildbot.steps.dummy.Wait}
1316 """
1317
1318 def start(self):
1319 self.d = defer.Deferred()
1320 log.msg(" starting wait command [%s]" % self.stepId)
1321 handle = self.args['handle']
1322 cb = waitCommandRegistry[handle]
1323 del waitCommandRegistry[handle]
1324 def _called():
1325 log.msg(" wait-%s starting" % (handle,))
1326 d = cb()
1327 def _done(res):
1328 log.msg(" wait-%s finishing: %s" % (handle, res))
1329 return res
1330 d.addBoth(_done)
1331 d.addCallbacks(self.finished, self.failed)
1332 reactor.callLater(0, _called)
1333 return self.d
1334
1335 def interrupt(self):
1336 log.msg(" wait command interrupted")
1337 if self.interrupted:
1338 return
1339 self.interrupted = True
1340 self.finished("interrupted")
1341
1342 def finished(self, res):
1343 log.msg(" wait command finished [%s]" % self.stepId)
1344 if self.interrupted:
1345 self.sendStatus({'rc': 2})
1346 else:
1347 self.sendStatus({'rc': 0})
1348 self.d.callback(0)
1349 def failed(self, why):
1350 log.msg(" wait command failed [%s]" % self.stepId)
1351 self.sendStatus({'rc': 1})
1352 self.d.callback(0)
1353
1354 registerSlaveCommand("dummy.wait", WaitCommand, command_version)
1355
1356
1357 class SourceBase(Command):
1358 """Abstract base class for Version Control System operations (checkout
1359 and update). This class extracts the following arguments from the
1360 dictionary received from the master:
1361
1362 - ['workdir']: (required) the subdirectory where the buildable sources
1363 should be placed
1364
1365 - ['mode']: one of update/copy/clobber/export, defaults to 'update'
1366
1367 - ['revision']: If not None, this is an int or string which indicates
1368 which sources (along a time-like axis) should be used.
1369 It is the thing you provide as the CVS -r or -D
1370 argument.
1371
1372 - ['patch']: If not None, this is a tuple of (striplevel, patch)
1373 which contains a patch that should be applied after the
1374 checkout has occurred. Once applied, the tree is no
1375 longer eligible for use with mode='update', and it only
1376 makes sense to use this in conjunction with a
1377 ['revision'] argument. striplevel is an int, and patch
1378 is a string in standard unified diff format. The patch
1379 will be applied with 'patch -p%d <PATCH', with
1380 STRIPLEVEL substituted as %d. The command will fail if
1381 the patch process fails (rejected hunks).
1382
1383 - ['timeout']: seconds of silence tolerated before we kill off the
1384 command
1385
1386 - ['maxTime']: seconds before we kill off the command
1387
1388 - ['retry']: If not None, this is a tuple of (delay, repeats)
1389 which means that any failed VC updates should be
1390 reattempted, up to REPEATS times, after a delay of
1391 DELAY seconds. This is intended to deal with slaves
1392 that experience transient network failures.
1393 """
1394
1395 sourcedata = ""
1396
1397 def setup(self, args):
1398 # if we need to parse the output, use this environment. Otherwise
1399 # command output will be in whatever the buildslave's native language
1400 # has been set to.
1401 self.env = os.environ.copy()
1402 self.env['LC_MESSAGES'] = "C"
1403
1404 self.workdir = args['workdir']
1405 self.mode = args.get('mode', "update")
1406 self.revision = args.get('revision')
1407 self.patch = args.get('patch')
1408 self.timeout = args.get('timeout', 120)
1409 self.maxTime = args.get('maxTime', None)
1410 self.retry = args.get('retry')
1411 # VC-specific subclasses should override this to extract more args.
1412 # Make sure to upcall!
1413
1414 def start(self):
1415 self.sendStatus({'header': "starting " + self.header + "\n"})
1416 self.command = None
1417
1418 # self.srcdir is where the VC system should put the sources
1419 if self.mode == "copy":
1420 self.srcdir = "source" # hardwired directory name, sorry
1421 else:
1422 self.srcdir = self.workdir
1423 self.sourcedatafile = os.path.join(self.builder.basedir,
1424 self.srcdir,
1425 ".buildbot-sourcedata")
1426
1427 d = defer.succeed(None)
1428 self.maybeClobber(d)
1429 if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
1430 # the directory cannot be updated, so we have to clobber it.
1431 # Perhaps the master just changed modes from 'export' to
1432 # 'update'.
1433 d.addCallback(self.doClobber, self.srcdir)
1434
1435 d.addCallback(self.doVC)
1436
1437 if self.mode == "copy":
1438 d.addCallback(self.doCopy)
1439 if self.patch:
1440 d.addCallback(self.doPatch)
1441 d.addCallbacks(self._sendRC, self._checkAbandoned)
1442 return d
1443
1444 def maybeClobber(self, d):
1445 # do we need to clobber anything?
1446 if self.mode in ("copy", "clobber", "export"):
1447 d.addCallback(self.doClobber, self.workdir)
1448
1449 def interrupt(self):
1450 self.interrupted = True
1451 if self.command:
1452 self.command.kill("command interrupted")
1453
1454 def doVC(self, res):
1455 if self.interrupted:
1456 raise AbandonChain(1)
1457 if self.sourcedirIsUpdateable() and self.sourcedataMatches():
1458 d = self.doVCUpdate()
1459 d.addCallback(self.maybeDoVCFallback)
1460 else:
1461 d = self.doVCFull()
1462 d.addBoth(self.maybeDoVCRetry)
1463 d.addCallback(self._abandonOnFailure)
1464 d.addCallback(self._handleGotRevision)
1465 d.addCallback(self.writeSourcedata)
1466 return d
1467
1468 def sourcedataMatches(self):
1469 try:
1470 olddata = self.readSourcedata()
1471 if olddata != self.sourcedata:
1472 return False
1473 except IOError:
1474 return False
1475 return True
1476
1477 def sourcedirIsPatched(self):
1478 return os.path.exists(os.path.join(self.builder.basedir,
1479 self.workdir,
1480 ".buildbot-patched"))
1481
1482 def _handleGotRevision(self, res):
1483 d = defer.maybeDeferred(self.parseGotRevision)
1484 d.addCallback(lambda got_revision:
1485 self.sendStatus({'got_revision': got_revision}))
1486 return d
1487
1488 def parseGotRevision(self):
1489 """Override this in a subclass. It should return a string that
1490 represents which revision was actually checked out, or a Deferred
1491 that will fire with such a string. If, in a future build, you were to
1492 pass this 'got_revision' string in as the 'revision' component of a
1493 SourceStamp, you should wind up with the same source code as this
1494 checkout just obtained.
1495
1496 It is probably most useful to scan self.command.stdout for a string
1497 of some sort. Be sure to set keepStdout=True on the VC command that
1498 you run, so that you'll have something available to look at.
1499
1500 If this information is unavailable, just return None."""
1501
1502 return None
1503
1504 def readSourcedata(self):
1505 return open(self.sourcedatafile, "r").read()
1506
1507 def writeSourcedata(self, res):
1508 open(self.sourcedatafile, "w").write(self.sourcedata)
1509 return res
1510
1511 def sourcedirIsUpdateable(self):
1512 """Returns True if the tree can be updated."""
1513 raise NotImplementedError("this must be implemented in a subclass")
1514
1515 def doVCUpdate(self):
1516 """Returns a deferred with the steps to update a checkout."""
1517 raise NotImplementedError("this must be implemented in a subclass")
1518
1519 def doVCFull(self):
1520 """Returns a deferred with the steps to do a fresh checkout."""
1521 raise NotImplementedError("this must be implemented in a subclass")
1522
1523 def maybeDoVCFallback(self, rc):
1524 if type(rc) is int and rc == 0:
1525 return rc
1526 if self.interrupted:
1527 raise AbandonChain(1)
1528 msg = "update failed, clobbering and trying again"
1529 self.sendStatus({'header': msg + "\n"})
1530 log.msg(msg)
1531 d = self.doClobber(None, self.srcdir)
1532 d.addCallback(self.doVCFallback2)
1533 return d
1534
1535 def doVCFallback2(self, res):
1536 msg = "now retrying VC operation"
1537 self.sendStatus({'header': msg + "\n"})
1538 log.msg(msg)
1539 d = self.doVCFull()
1540 d.addBoth(self.maybeDoVCRetry)
1541 d.addCallback(self._abandonOnFailure)
1542 return d
1543
1544 def maybeDoVCRetry(self, res):
1545 """We get here somewhere after a VC chain has finished. res could
1546 be::
1547
1548 - 0: the operation was successful
1549 - nonzero: the operation failed. retry if possible
1550 - AbandonChain: the operation failed, someone else noticed. retry.
1551 - Failure: some other exception, re-raise
1552 """
1553
1554 if isinstance(res, failure.Failure):
1555 if self.interrupted:
1556 return res # don't re-try interrupted builds
1557 res.trap(AbandonChain)
1558 else:
1559 if type(res) is int and res == 0:
1560 return res
1561 if self.interrupted:
1562 raise AbandonChain(1)
1563 # if we get here, we should retry, if possible
1564 if self.retry:
1565 delay, repeats = self.retry
1566 if repeats >= 0:
1567 self.retry = (delay, repeats-1)
1568 msg = ("update failed, trying %d more times after %d seconds"
1569 % (repeats, delay))
1570 self.sendStatus({'header': msg + "\n"})
1571 log.msg(msg)
1572 d = defer.Deferred()
1573 self.maybeClobber(d)
1574 d.addCallback(lambda res: self.doVCFull())
1575 d.addBoth(self.maybeDoVCRetry)
1576 reactor.callLater(delay, d.callback, None)
1577 return d
1578 return res
1579
1580 def doClobber(self, dummy, dirname, chmodDone=False):
1581 # TODO: remove the old tree in the background
1582 ## workdir = os.path.join(self.builder.basedir, self.workdir)
1583 ## deaddir = self.workdir + ".deleting"
1584 ## if os.path.isdir(workdir):
1585 ## try:
1586 ## os.rename(workdir, deaddir)
1587 ## # might fail if deaddir already exists: previous deletion
1588 ## # hasn't finished yet
1589 ## # start the deletion in the background
1590 ## # TODO: there was a solaris/NetApp/NFS problem where a
1591 ## # process that was still running out of the directory we're
1592 ## # trying to delete could prevent the rm-rf from working. I
1593 ## # think it stalled the rm, but maybe it just died with
1594 ## # permission issues. Try to detect this.
1595 ## os.commands("rm -rf %s &" % deaddir)
1596 ## except:
1597 ## # fall back to sequential delete-then-checkout
1598 ## pass
1599 d = os.path.join(self.builder.basedir, dirname)
1600 if runtime.platformType != "posix":
1601 # if we're running on w32, use rmtree instead. It will block,
1602 # but hopefully it won't take too long.
1603 rmdirRecursive(d)
1604 return defer.succeed(0)
1605 command = ["rm", "-rf", d]
1606 c = ShellCommand(self.builder, command, self.builder.basedir,
1607 sendRC=0, timeout=self.timeout, maxTime=self.maxTime,
1608 usePTY=False)
1609
1610 self.command = c
1611 # sendRC=0 means the rm command will send stdout/stderr to the
1612 # master, but not the rc=0 when it finishes. That job is left to
1613 # _sendRC
1614 d = c.start()
1615 # The rm -rf may fail if there is a left-over subdir with chmod 000
1616 # permissions. So if we get a failure, we attempt to chmod suitable
1617 # permissions and re-try the rm -rf.
1618 if chmodDone:
1619 d.addCallback(self._abandonOnFailure)
1620 else:
1621 d.addCallback(lambda rc: self.doClobberTryChmodIfFail(rc, dirname))
1622 return d
1623
1624 def doClobberTryChmodIfFail(self, rc, dirname):
1625 assert isinstance(rc, int)
1626 if rc == 0:
1627 return defer.succeed(0)
1628 # Attempt a recursive chmod and re-try the rm -rf after.
1629 command = ["chmod", "-R", "u+rwx", os.path.join(self.builder.basedir, di rname)]
1630 c = ShellCommand(self.builder, command, self.builder.basedir,
1631 sendRC=0, timeout=self.timeout, maxTime=self.maxTime,
1632 usePTY=False)
1633
1634 self.command = c
1635 d = c.start()
1636 d.addCallback(self._abandonOnFailure)
1637 d.addCallback(lambda dummy: self.doClobber(dummy, dirname, True))
1638 return d
1639
1640 def doCopy(self, res):
1641 # now copy tree to workdir
1642 fromdir = os.path.join(self.builder.basedir, self.srcdir)
1643 todir = os.path.join(self.builder.basedir, self.workdir)
1644 if runtime.platformType != "posix":
1645 self.sendStatus({'header': "Since we're on a non-POSIX platform, "
1646 "we're not going to try to execute cp in a subprocess, but instead "
1647 "use shutil.copytree(), which will block until it is complete. "
1648 "fromdir: %s, todir: %s\n" % (fromdir, todir)})
1649 shutil.copytree(fromdir, todir)
1650 return defer.succeed(0)
1651
1652 if not os.path.exists(os.path.dirname(todir)):
1653 os.makedirs(os.path.dirname(todir))
1654 if os.path.exists(todir):
1655 # I don't think this happens, but just in case..
1656 log.msg("cp target '%s' already exists -- cp will not do what you th ink!" % todir)
1657
1658 command = ['cp', '-R', '-P', '-p', fromdir, todir]
1659 c = ShellCommand(self.builder, command, self.builder.basedir,
1660 sendRC=False, timeout=self.timeout, maxTime=self.maxTim e,
1661 usePTY=False)
1662 self.command = c
1663 d = c.start()
1664 d.addCallback(self._abandonOnFailure)
1665 return d
1666
1667 def doPatch(self, res):
1668 patchlevel = self.patch[0]
1669 diff = self.patch[1]
1670 root = None
1671 if len(self.patch) >= 3:
1672 root = self.patch[2]
1673 command = [
1674 getCommand("patch"),
1675 '-p%d' % patchlevel,
1676 '--remove-empty-files',
1677 '--force',
1678 '--forward',
1679 ]
1680 dir = os.path.join(self.builder.basedir, self.workdir)
1681 # Mark the directory so we don't try to update it later, or at least try
1682 # to revert first.
1683 marker = open(os.path.join(dir, ".buildbot-patched"), "w")
1684 marker.write("patched\n")
1685 marker.close()
1686
1687 # Update 'dir' with the 'root' option. Make sure it is a subdirectory
1688 # of dir.
1689 if (root and
1690 os.path.abspath(os.path.join(dir, root)
1691 ).startswith(os.path.abspath(dir))):
1692 dir = os.path.join(dir, root)
1693
1694 # now apply the patch
1695 c = ShellCommand(self.builder, command, dir,
1696 sendRC=False, timeout=self.timeout,
1697 maxTime=self.maxTime, initialStdin=diff, usePTY=False)
1698 self.command = c
1699 d = c.start()
1700 d.addCallback(self._abandonOnFailure)
1701 return d
1702
1703
1704 class CVS(SourceBase):
1705 """CVS-specific VC operation. In addition to the arguments handled by
1706 SourceBase, this command reads the following keys:
1707
1708 ['cvsroot'] (required): the CVSROOT repository string
1709 ['cvsmodule'] (required): the module to be retrieved
1710 ['branch']: a '-r' tag or branch name to use for the checkout/update
1711 ['login']: a string for use as a password to 'cvs login'
1712 ['global_options']: a list of strings to use before the CVS verb
1713 ['checkout_options']: a list of strings to use after checkout,
1714 but before revision and branch specifiers
1715 """
1716
1717 header = "cvs operation"
1718
1719 def setup(self, args):
1720 SourceBase.setup(self, args)
1721 self.vcexe = getCommand("cvs")
1722 self.cvsroot = args['cvsroot']
1723 self.cvsmodule = args['cvsmodule']
1724 self.global_options = args.get('global_options', [])
1725 self.checkout_options = args.get('checkout_options', [])
1726 self.branch = args.get('branch')
1727 self.login = args.get('login')
1728 self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule,
1729 self.branch)
1730
1731 def sourcedirIsUpdateable(self):
1732 return (not self.sourcedirIsPatched() and
1733 os.path.isdir(os.path.join(self.builder.basedir,
1734 self.srcdir, "CVS")))
1735
1736 def start(self):
1737 if self.login is not None:
1738 # need to do a 'cvs login' command first
1739 d = self.builder.basedir
1740 command = ([self.vcexe, '-d', self.cvsroot] + self.global_options
1741 + ['login'])
1742 c = ShellCommand(self.builder, command, d,
1743 sendRC=False, timeout=self.timeout,
1744 maxTime=self.maxTime,
1745 initialStdin=self.login+"\n", usePTY=False)
1746 self.command = c
1747 d = c.start()
1748 d.addCallback(self._abandonOnFailure)
1749 d.addCallback(self._didLogin)
1750 return d
1751 else:
1752 return self._didLogin(None)
1753
1754 def _didLogin(self, res):
1755 # now we really start
1756 return SourceBase.start(self)
1757
1758 def doVCUpdate(self):
1759 d = os.path.join(self.builder.basedir, self.srcdir)
1760 command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP']
1761 if self.branch:
1762 command += ['-r', self.branch]
1763 if self.revision:
1764 command += ['-D', self.revision]
1765 c = ShellCommand(self.builder, command, d,
1766 sendRC=False, timeout=self.timeout,
1767 maxTime=self.maxTime, usePTY=False)
1768 self.command = c
1769 return c.start()
1770
1771 def doVCFull(self):
1772 d = self.builder.basedir
1773 if self.mode == "export":
1774 verb = "export"
1775 else:
1776 verb = "checkout"
1777 command = ([self.vcexe, '-d', self.cvsroot, '-z3'] +
1778 self.global_options +
1779 [verb, '-d', self.srcdir])
1780
1781 if verb == "checkout":
1782 command += self.checkout_options
1783 if self.branch:
1784 command += ['-r', self.branch]
1785 if self.revision:
1786 command += ['-D', self.revision]
1787 command += [self.cvsmodule]
1788
1789 c = ShellCommand(self.builder, command, d,
1790 sendRC=False, timeout=self.timeout,
1791 maxTime=self.maxTime, usePTY=False)
1792 self.command = c
1793 return c.start()
1794
1795 def parseGotRevision(self):
1796 # CVS does not have any kind of revision stamp to speak of. We return
1797 # the current timestamp as a best-effort guess, but this depends upon
1798 # the local system having a clock that is
1799 # reasonably-well-synchronized with the repository.
1800 return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
1801
1802 registerSlaveCommand("cvs", CVS, command_version)
1803
1804 class SVN(SourceBase):
1805 """Subversion-specific VC operation. In addition to the arguments
1806 handled by SourceBase, this command reads the following keys:
1807
1808 ['svnurl'] (required): the SVN repository string
1809 ['username']: Username passed to the svn command
1810 ['password']: Password passed to the svn command
1811 ['keep_on_purge']: Files and directories to keep between updates
1812 ['ignore_ignores']: Ignore ignores when purging changes
1813 ['always_purge']: Always purge local changes after each build
1814 ['depth']: Pass depth argument to subversion 1.5+
1815 """
1816
1817 header = "svn operation"
1818
1819 def setup(self, args):
1820 SourceBase.setup(self, args)
1821 self.vcexe = getCommand("svn")
1822 self.svnurl = args['svnurl']
1823 self.sourcedata = "%s\n" % self.svnurl
1824 self.keep_on_purge = args.get('keep_on_purge', [])
1825 self.keep_on_purge.append(".buildbot-sourcedata")
1826 self.ignore_ignores = args.get('ignore_ignores', True)
1827 self.always_purge = args.get('always_purge', False)
1828
1829 self.svn_args = []
1830 if args.has_key('username'):
1831 self.svn_args.extend(["--username", args['username']])
1832 if args.has_key('password'):
1833 self.svn_args.extend(["--password", Obfuscated(args['password'], "XX XX")])
1834 if args.get('extra_args', None) is not None:
1835 self.svn_args.extend(args['extra_args'])
1836
1837 if args.has_key('depth'):
1838 self.svn_args.extend(["--depth",args['depth']])
1839
1840 def _dovccmd(self, command, args, rootdir=None, cb=None, **kwargs):
1841 if rootdir is None:
1842 rootdir = os.path.join(self.builder.basedir, self.srcdir)
1843 fullCmd = [self.vcexe, command, '--non-interactive', '--no-auth-cache']
1844 fullCmd.extend(self.svn_args)
1845 fullCmd.extend(args)
1846 c = ShellCommand(self.builder, fullCmd, rootdir,
1847 environ=self.env, sendRC=False, timeout=self.timeout,
1848 maxTime=self.maxTime, usePTY=False, **kwargs)
1849 self.command = c
1850 d = c.start()
1851 if cb:
1852 d.addCallback(self._abandonOnFailure)
1853 d.addCallback(cb)
1854 return d
1855
1856 def sourcedirIsUpdateable(self):
1857 return os.path.isdir(os.path.join(self.builder.basedir,
1858 self.srcdir, ".svn"))
1859
1860 def doVCUpdate(self):
1861 if self.sourcedirIsPatched() or self.always_purge:
1862 return self._purgeAndUpdate()
1863 revision = self.args['revision'] or 'HEAD'
1864 # update: possible for mode in ('copy', 'update')
1865 return self._dovccmd('update', ['--revision', str(revision)],
1866 keepStdout=True)
1867
1868 def doVCFull(self):
1869 revision = self.args['revision'] or 'HEAD'
1870 args = ['--revision', str(revision), self.svnurl, self.srcdir]
1871 if self.mode == "export":
1872 command = 'export'
1873 else:
1874 # mode=='clobber', or copy/update on a broken workspace
1875 command = 'checkout'
1876 return self._dovccmd(command, args, rootdir=self.builder.basedir,
1877 keepStdout=True)
1878
1879 def _purgeAndUpdate(self):
1880 """svn revert has several corner cases that make it unpractical.
1881
1882 Use the Force instead and delete everything that shows up in status."""
1883 args = ['--xml']
1884 if self.ignore_ignores:
1885 args.append('--no-ignore')
1886 return self._dovccmd('status', args, keepStdout=True, sendStdout=False,
1887 cb=self._purgeAndUpdate2)
1888
1889 def _purgeAndUpdate2(self, res):
1890 """Delete everything that shown up on status."""
1891 result_xml = parseString(self.command.stdout)
1892 for entry in result_xml.getElementsByTagName('entry'):
1893 filename = entry.getAttribute('path')
1894 if filename in self.keep_on_purge:
1895 continue
1896 filepath = os.path.join(self.builder.basedir, self.workdir,
1897 filename)
1898 self.sendStatus({'stdout': "%s\n" % filepath})
1899 if os.path.isfile(filepath):
1900 os.chmod(filepath, 0700)
1901 os.remove(filepath)
1902 else:
1903 rmdirRecursive(filepath)
1904 # Now safe to update.
1905 revision = self.args['revision'] or 'HEAD'
1906 return self._dovccmd('update', ['--revision', str(revision)],
1907 keepStdout=True)
1908
1909 def getSvnVersionCommand(self):
1910 """
1911 Get the (shell) command used to determine SVN revision number
1912 of checked-out code
1913
1914 return: list of strings, passable as the command argument to ShellComman d
1915 """
1916 # svn checkout operations finish with 'Checked out revision 16657.'
1917 # svn update operations finish the line 'At revision 16654.'
1918 # But we don't use those. Instead, run 'svnversion'.
1919 svnversion_command = getCommand("svnversion")
1920 # older versions of 'svnversion' (1.1.4) require the WC_PATH
1921 # argument, newer ones (1.3.1) do not.
1922 return [svnversion_command, "."]
1923
1924 def parseGotRevision(self):
1925 c = ShellCommand(self.builder,
1926 self.getSvnVersionCommand(),
1927 os.path.join(self.builder.basedir, self.srcdir),
1928 environ=self.env,
1929 sendStdout=False, sendStderr=False, sendRC=False,
1930 keepStdout=True, usePTY=False)
1931 d = c.start()
1932 def _parse(res):
1933 r_raw = c.stdout.strip()
1934 # Extract revision from the version "number" string
1935 r = r_raw.rstrip('MS')
1936 r = r.split(':')[-1]
1937 got_version = None
1938 try:
1939 got_version = int(r)
1940 except ValueError:
1941 msg =("SVN.parseGotRevision unable to parse output "
1942 "of svnversion: '%s'" % r_raw)
1943 log.msg(msg)
1944 self.sendStatus({'header': msg + "\n"})
1945 return got_version
1946 d.addCallback(_parse)
1947 return d
1948
1949
1950 registerSlaveCommand("svn", SVN, command_version)
1951
1952 class Darcs(SourceBase):
1953 """Darcs-specific VC operation. In addition to the arguments
1954 handled by SourceBase, this command reads the following keys:
1955
1956 ['repourl'] (required): the Darcs repository string
1957 """
1958
1959 header = "darcs operation"
1960
1961 def setup(self, args):
1962 SourceBase.setup(self, args)
1963 self.vcexe = getCommand("darcs")
1964 self.repourl = args['repourl']
1965 self.sourcedata = "%s\n" % self.repourl
1966 self.revision = self.args.get('revision')
1967
1968 def sourcedirIsUpdateable(self):
1969 # checking out a specific revision requires a full 'darcs get'
1970 return (not self.revision and
1971 not self.sourcedirIsPatched() and
1972 os.path.isdir(os.path.join(self.builder.basedir,
1973 self.srcdir, "_darcs")))
1974
1975 def doVCUpdate(self):
1976 assert not self.revision
1977 # update: possible for mode in ('copy', 'update')
1978 d = os.path.join(self.builder.basedir, self.srcdir)
1979 command = [self.vcexe, 'pull', '--all', '--verbose']
1980 c = ShellCommand(self.builder, command, d,
1981 sendRC=False, timeout=self.timeout,
1982 maxTime=self.maxTime, usePTY=False)
1983 self.command = c
1984 return c.start()
1985
1986 def doVCFull(self):
1987 # checkout or export
1988 d = self.builder.basedir
1989 command = [self.vcexe, 'get', '--verbose', '--partial',
1990 '--repo-name', self.srcdir]
1991 if self.revision:
1992 # write the context to a file
1993 n = os.path.join(self.builder.basedir, ".darcs-context")
1994 f = open(n, "wb")
1995 f.write(self.revision)
1996 f.close()
1997 # tell Darcs to use that context
1998 command.append('--context')
1999 command.append(n)
2000 command.append(self.repourl)
2001
2002 c = ShellCommand(self.builder, command, d,
2003 sendRC=False, timeout=self.timeout,
2004 maxTime=self.maxTime, usePTY=False)
2005 self.command = c
2006 d = c.start()
2007 if self.revision:
2008 d.addCallback(self.removeContextFile, n)
2009 return d
2010
2011 def removeContextFile(self, res, n):
2012 os.unlink(n)
2013 return res
2014
2015 def parseGotRevision(self):
2016 # we use 'darcs context' to find out what we wound up with
2017 command = [self.vcexe, "changes", "--context"]
2018 c = ShellCommand(self.builder, command,
2019 os.path.join(self.builder.basedir, self.srcdir),
2020 environ=self.env,
2021 sendStdout=False, sendStderr=False, sendRC=False,
2022 keepStdout=True, usePTY=False)
2023 d = c.start()
2024 d.addCallback(lambda res: c.stdout)
2025 return d
2026
2027 registerSlaveCommand("darcs", Darcs, command_version)
2028
2029 class Monotone(SourceBase):
2030 """Monotone-specific VC operation. In addition to the arguments handled
2031 by SourceBase, this command reads the following keys:
2032
2033 ['server_addr'] (required): the address of the server to pull from
2034 ['branch'] (required): the branch the revision is on
2035 ['db_path'] (required): the local database path to use
2036 ['revision'] (required): the revision to check out
2037 ['monotone']: (required): path to monotone executable
2038 """
2039
2040 header = "monotone operation"
2041
2042 def setup(self, args):
2043 SourceBase.setup(self, args)
2044 self.server_addr = args["server_addr"]
2045 self.branch = args["branch"]
2046 self.db_path = args["db_path"]
2047 self.revision = args["revision"]
2048 self.monotone = args["monotone"]
2049 self._made_fulls = False
2050 self._pull_timeout = args["timeout"]
2051
2052 def _makefulls(self):
2053 if not self._made_fulls:
2054 basedir = self.builder.basedir
2055 self.full_db_path = os.path.join(basedir, self.db_path)
2056 self.full_srcdir = os.path.join(basedir, self.srcdir)
2057 self._made_fulls = True
2058
2059 def sourcedirIsUpdateable(self):
2060 self._makefulls()
2061 return (not self.sourcedirIsPatched() and
2062 os.path.isfile(self.full_db_path) and
2063 os.path.isdir(os.path.join(self.full_srcdir, "MT")))
2064
2065 def doVCUpdate(self):
2066 return self._withFreshDb(self._doUpdate)
2067
2068 def _doUpdate(self):
2069 # update: possible for mode in ('copy', 'update')
2070 command = [self.monotone, "update",
2071 "-r", self.revision,
2072 "-b", self.branch]
2073 c = ShellCommand(self.builder, command, self.full_srcdir,
2074 sendRC=False, timeout=self.timeout,
2075 maxTime=self.maxTime, usePTY=False)
2076 self.command = c
2077 return c.start()
2078
2079 def doVCFull(self):
2080 return self._withFreshDb(self._doFull)
2081
2082 def _doFull(self):
2083 command = [self.monotone, "--db=" + self.full_db_path,
2084 "checkout",
2085 "-r", self.revision,
2086 "-b", self.branch,
2087 self.full_srcdir]
2088 c = ShellCommand(self.builder, command, self.builder.basedir,
2089 sendRC=False, timeout=self.timeout,
2090 maxTime=self.maxTime, usePTY=False)
2091 self.command = c
2092 return c.start()
2093
2094 def _withFreshDb(self, callback):
2095 self._makefulls()
2096 # first ensure the db exists and is usable
2097 if os.path.isfile(self.full_db_path):
2098 # already exists, so run 'db migrate' in case monotone has been
2099 # upgraded under us
2100 command = [self.monotone, "db", "migrate",
2101 "--db=" + self.full_db_path]
2102 else:
2103 # We'll be doing an initial pull, so up the timeout to 3 hours to
2104 # make sure it will have time to complete.
2105 self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60)
2106 self.sendStatus({"header": "creating database %s\n"
2107 % (self.full_db_path,)})
2108 command = [self.monotone, "db", "init",
2109 "--db=" + self.full_db_path]
2110 c = ShellCommand(self.builder, command, self.builder.basedir,
2111 sendRC=False, timeout=self.timeout,
2112 maxTime=self.maxTime, usePTY=False)
2113 self.command = c
2114 d = c.start()
2115 d.addCallback(self._abandonOnFailure)
2116 d.addCallback(self._didDbInit)
2117 d.addCallback(self._didPull, callback)
2118 return d
2119
2120 def _didDbInit(self, res):
2121 command = [self.monotone, "--db=" + self.full_db_path,
2122 "pull", "--ticker=dot", self.server_addr, self.branch]
2123 c = ShellCommand(self.builder, command, self.builder.basedir,
2124 sendRC=False, timeout=self._pull_timeout,
2125 maxTime=self.maxTime, usePTY=False)
2126 self.sendStatus({"header": "pulling %s from %s\n"
2127 % (self.branch, self.server_addr)})
2128 self.command = c
2129 return c.start()
2130
2131 def _didPull(self, res, callback):
2132 return callback()
2133
2134 registerSlaveCommand("monotone", Monotone, command_version)
2135
2136
2137 class Git(SourceBase):
2138 """Git specific VC operation. In addition to the arguments
2139 handled by SourceBase, this command reads the following keys:
2140
2141 ['repourl'] (required): the upstream GIT repository string
2142 ['branch'] (optional): which version (i.e. branch or tag) to
2143 retrieve. Default: "master".
2144 ['submodules'] (optional): whether to initialize and update
2145 submodules. Default: False.
2146 ['ignore_ignores']: ignore ignores when purging changes.
2147 """
2148
2149 header = "git operation"
2150
2151 def setup(self, args):
2152 SourceBase.setup(self, args)
2153 self.vcexe = getCommand("git")
2154 self.repourl = args['repourl']
2155 self.branch = args.get('branch')
2156 if not self.branch:
2157 self.branch = "master"
2158 self.sourcedata = "%s %s\n" % (self.repourl, self.branch)
2159 self.submodules = args.get('submodules')
2160 self.ignore_ignores = args.get('ignore_ignores', True)
2161
2162 def _fullSrcdir(self):
2163 return os.path.join(self.builder.basedir, self.srcdir)
2164
2165 def _commitSpec(self):
2166 if self.revision:
2167 return self.revision
2168 return self.branch
2169
2170 def sourcedirIsUpdateable(self):
2171 return os.path.isdir(os.path.join(self._fullSrcdir(), ".git"))
2172
2173 def _dovccmd(self, command, cb=None, **kwargs):
2174 c = ShellCommand(self.builder, [self.vcexe] + command, self._fullSrcdir( ),
2175 sendRC=False, timeout=self.timeout,
2176 maxTime=self.maxTime, usePTY=False, **kwargs)
2177 self.command = c
2178 d = c.start()
2179 if cb:
2180 d.addCallback(self._abandonOnFailure)
2181 d.addCallback(cb)
2182 return d
2183
2184 # If the repourl matches the sourcedata file, then
2185 # we can say that the sourcedata matches. We can
2186 # ignore branch changes, since Git can work with
2187 # many branches fetched, and we deal with it properly
2188 # in doVCUpdate.
2189 def sourcedataMatches(self):
2190 try:
2191 olddata = self.readSourcedata()
2192 if not olddata.startswith(self.repourl+' '):
2193 return False
2194 except IOError:
2195 return False
2196 return True
2197
2198 def _cleanSubmodules(self, res):
2199 command = ['submodule', 'foreach', 'git', 'clean', '-d', '-f']
2200 if self.ignore_ignores:
2201 command.append('-x')
2202 return self._dovccmd(command)
2203
2204 def _updateSubmodules(self, res):
2205 return self._dovccmd(['submodule', 'update'], self._cleanSubmodules)
2206
2207 def _initSubmodules(self, res):
2208 if self.submodules:
2209 return self._dovccmd(['submodule', 'init'], self._updateSubmodules)
2210 else:
2211 return defer.succeed(0)
2212
2213 def _didHeadCheckout(self, res):
2214 # Rename branch, so that the repo will have the expected branch name
2215 # For further information about this, see the commit message
2216 command = ['branch', '-M', self.branch]
2217 return self._dovccmd(command, self._initSubmodules)
2218
2219 def _didFetch(self, res):
2220 if self.revision:
2221 head = self.revision
2222 else:
2223 head = 'FETCH_HEAD'
2224
2225 # That is not sufficient. git will leave unversioned files and empty
2226 # directories. Clean them up manually in _didReset.
2227 command = ['reset', '--hard', head]
2228 return self._dovccmd(command, self._didHeadCheckout)
2229
2230 # Update first runs "git clean", removing local changes,
2231 # if the branch to be checked out has changed. This, combined
2232 # with the later "git reset" equates clobbering the repo,
2233 # but it's much more efficient.
2234 def doVCUpdate(self):
2235 try:
2236 # Check to see if our branch has changed
2237 diffbranch = self.sourcedata != self.readSourcedata()
2238 except IOError:
2239 diffbranch = False
2240 if diffbranch:
2241 command = ['git', 'clean', '-f', '-d']
2242 if self.ignore_ignores:
2243 command.append('-x')
2244 c = ShellCommand(self.builder, command, self._fullSrcdir(),
2245 sendRC=False, timeout=self.timeout, usePTY=False)
2246 self.command = c
2247 d = c.start()
2248 d.addCallback(self._abandonOnFailure)
2249 d.addCallback(self._didClean)
2250 return d
2251 return self._didClean(None)
2252
2253 def _doFetch(self, dummy):
2254 # The plus will make sure the repo is moved to the branch's
2255 # head even if it is not a simple "fast-forward"
2256 command = ['fetch', '-t', self.repourl, '+%s' % self.branch]
2257 self.sendStatus({"header": "fetching branch %s from %s\n"
2258 % (self.branch, self.repourl)})
2259 return self._dovccmd(command, self._didFetch)
2260
2261 def _didClean(self, dummy):
2262 # After a clean, try to use the given revision if we have one.
2263 if self.revision:
2264 # We know what revision we want. See if we have it.
2265 d = self._dovccmd(['reset', '--hard', self.revision],
2266 self._initSubmodules)
2267 # If we are unable to reset to the specified version, we
2268 # must do a fetch first and retry.
2269 d.addErrback(self._doFetch)
2270 return d
2271 else:
2272 # No known revision, go grab the latest.
2273 return self._doFetch(None)
2274
2275 def _didInit(self, res):
2276 return self.doVCUpdate()
2277
2278 def doVCFull(self):
2279 os.makedirs(self._fullSrcdir())
2280 return self._dovccmd(['init'], self._didInit)
2281
2282 def parseGotRevision(self):
2283 command = ['rev-parse', 'HEAD']
2284 def _parse(res):
2285 hash = self.command.stdout.strip()
2286 if len(hash) != 40:
2287 return None
2288 return hash
2289 return self._dovccmd(command, _parse, keepStdout=True)
2290
2291 registerSlaveCommand("git", Git, command_version)
2292
2293 class Arch(SourceBase):
2294 """Arch-specific (tla-specific) VC operation. In addition to the
2295 arguments handled by SourceBase, this command reads the following keys:
2296
2297 ['url'] (required): the repository string
2298 ['version'] (required): which version (i.e. branch) to retrieve
2299 ['revision'] (optional): the 'patch-NN' argument to check out
2300 ['archive']: the archive name to use. If None, use the archive's default
2301 ['build-config']: if present, give to 'tla build-config' after checkout
2302 """
2303
2304 header = "arch operation"
2305 buildconfig = None
2306
2307 def setup(self, args):
2308 SourceBase.setup(self, args)
2309 self.vcexe = getCommand("tla")
2310 self.archive = args.get('archive')
2311 self.url = args['url']
2312 self.version = args['version']
2313 self.revision = args.get('revision')
2314 self.buildconfig = args.get('build-config')
2315 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
2316 self.buildconfig)
2317
2318 def sourcedirIsUpdateable(self):
2319 # Arch cannot roll a directory backwards, so if they ask for a
2320 # specific revision, clobber the directory. Technically this
2321 # could be limited to the cases where the requested revision is
2322 # later than our current one, but it's too hard to extract the
2323 # current revision from the tree.
2324 return (not self.revision and
2325 not self.sourcedirIsPatched() and
2326 os.path.isdir(os.path.join(self.builder.basedir,
2327 self.srcdir, "{arch}")))
2328
2329 def doVCUpdate(self):
2330 # update: possible for mode in ('copy', 'update')
2331 d = os.path.join(self.builder.basedir, self.srcdir)
2332 command = [self.vcexe, 'replay']
2333 if self.revision:
2334 command.append(self.revision)
2335 c = ShellCommand(self.builder, command, d,
2336 sendRC=False, timeout=self.timeout,
2337 maxTime=self.maxTime, usePTY=False)
2338 self.command = c
2339 return c.start()
2340
2341 def doVCFull(self):
2342 # to do a checkout, we must first "register" the archive by giving
2343 # the URL to tla, which will go to the repository at that URL and
2344 # figure out the archive name. tla will tell you the archive name
2345 # when it is done, and all further actions must refer to this name.
2346
2347 command = [self.vcexe, 'register-archive', '--force', self.url]
2348 c = ShellCommand(self.builder, command, self.builder.basedir,
2349 sendRC=False, keepStdout=True, timeout=self.timeout,
2350 maxTime=self.maxTime, usePTY=False)
2351 self.command = c
2352 d = c.start()
2353 d.addCallback(self._abandonOnFailure)
2354 d.addCallback(self._didRegister, c)
2355 return d
2356
2357 def _didRegister(self, res, c):
2358 # find out what tla thinks the archive name is. If the user told us
2359 # to use something specific, make sure it matches.
2360 r = re.search(r'Registering archive: (\S+)\s*$', c.stdout)
2361 if r:
2362 msg = "tla reports archive name is '%s'" % r.group(1)
2363 log.msg(msg)
2364 self.builder.sendUpdate({'header': msg+"\n"})
2365 if self.archive and r.group(1) != self.archive:
2366 msg = (" mismatch, we wanted an archive named '%s'"
2367 % self.archive)
2368 log.msg(msg)
2369 self.builder.sendUpdate({'header': msg+"\n"})
2370 raise AbandonChain(-1)
2371 self.archive = r.group(1)
2372 assert self.archive, "need archive name to continue"
2373 return self._doGet()
2374
2375 def _doGet(self):
2376 ver = self.version
2377 if self.revision:
2378 ver += "--%s" % self.revision
2379 command = [self.vcexe, 'get', '--archive', self.archive,
2380 '--no-pristine',
2381 ver, self.srcdir]
2382 c = ShellCommand(self.builder, command, self.builder.basedir,
2383 sendRC=False, timeout=self.timeout,
2384 maxTime=self.maxTime, usePTY=False)
2385 self.command = c
2386 d = c.start()
2387 d.addCallback(self._abandonOnFailure)
2388 if self.buildconfig:
2389 d.addCallback(self._didGet)
2390 return d
2391
2392 def _didGet(self, res):
2393 d = os.path.join(self.builder.basedir, self.srcdir)
2394 command = [self.vcexe, 'build-config', self.buildconfig]
2395 c = ShellCommand(self.builder, command, d,
2396 sendRC=False, timeout=self.timeout,
2397 maxTime=self.maxTime, usePTY=False)
2398 self.command = c
2399 d = c.start()
2400 d.addCallback(self._abandonOnFailure)
2401 return d
2402
2403 def parseGotRevision(self):
2404 # using code from tryclient.TlaExtractor
2405 # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
2406 # 'tla logs' gives us REVISION
2407 command = [self.vcexe, "logs", "--full", "--reverse"]
2408 c = ShellCommand(self.builder, command,
2409 os.path.join(self.builder.basedir, self.srcdir),
2410 environ=self.env,
2411 sendStdout=False, sendStderr=False, sendRC=False,
2412 keepStdout=True, usePTY=False)
2413 d = c.start()
2414 def _parse(res):
2415 tid = c.stdout.split("\n")[0].strip()
2416 slash = tid.index("/")
2417 dd = tid.rindex("--")
2418 #branch = tid[slash+1:dd]
2419 baserev = tid[dd+2:]
2420 return baserev
2421 d.addCallback(_parse)
2422 return d
2423
2424 registerSlaveCommand("arch", Arch, command_version)
2425
2426 class Bazaar(Arch):
2427 """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories.
2428 It is mostly option-compatible, but archive registration is different
2429 enough to warrant a separate Command.
2430
2431 ['archive'] (required): the name of the archive being used
2432 """
2433
2434 def setup(self, args):
2435 Arch.setup(self, args)
2436 self.vcexe = getCommand("baz")
2437 # baz doesn't emit the repository name after registration (and
2438 # grepping through the output of 'baz archives' is too hard), so we
2439 # require that the buildmaster configuration to provide both the
2440 # archive name and the URL.
2441 self.archive = args['archive'] # required for Baz
2442 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
2443 self.buildconfig)
2444
2445 # in _didRegister, the regexp won't match, so we'll stick with the name
2446 # in self.archive
2447
2448 def _doGet(self):
2449 # baz prefers ARCHIVE/VERSION. This will work even if
2450 # my-default-archive is not set.
2451 ver = self.archive + "/" + self.version
2452 if self.revision:
2453 ver += "--%s" % self.revision
2454 command = [self.vcexe, 'get', '--no-pristine',
2455 ver, self.srcdir]
2456 c = ShellCommand(self.builder, command, self.builder.basedir,
2457 sendRC=False, timeout=self.timeout,
2458 maxTime=self.maxTime, usePTY=False)
2459 self.command = c
2460 d = c.start()
2461 d.addCallback(self._abandonOnFailure)
2462 if self.buildconfig:
2463 d.addCallback(self._didGet)
2464 return d
2465
2466 def parseGotRevision(self):
2467 # using code from tryclient.BazExtractor
2468 command = [self.vcexe, "tree-id"]
2469 c = ShellCommand(self.builder, command,
2470 os.path.join(self.builder.basedir, self.srcdir),
2471 environ=self.env,
2472 sendStdout=False, sendStderr=False, sendRC=False,
2473 keepStdout=True, usePTY=False)
2474 d = c.start()
2475 def _parse(res):
2476 tid = c.stdout.strip()
2477 slash = tid.index("/")
2478 dd = tid.rindex("--")
2479 #branch = tid[slash+1:dd]
2480 baserev = tid[dd+2:]
2481 return baserev
2482 d.addCallback(_parse)
2483 return d
2484
2485 registerSlaveCommand("bazaar", Bazaar, command_version)
2486
2487
2488 class Bzr(SourceBase):
2489 """bzr-specific VC operation. In addition to the arguments
2490 handled by SourceBase, this command reads the following keys:
2491
2492 ['repourl'] (required): the Bzr repository string
2493 """
2494
2495 header = "bzr operation"
2496
2497 def setup(self, args):
2498 SourceBase.setup(self, args)
2499 self.vcexe = getCommand("bzr")
2500 self.repourl = args['repourl']
2501 self.sourcedata = "%s\n" % self.repourl
2502 self.revision = self.args.get('revision')
2503 self.forceSharedRepo = args.get('forceSharedRepo')
2504
2505 def sourcedirIsUpdateable(self):
2506 # checking out a specific revision requires a full 'bzr checkout'
2507 return (not self.revision and
2508 not self.sourcedirIsPatched() and
2509 os.path.isdir(os.path.join(self.builder.basedir,
2510 self.srcdir, ".bzr")))
2511
2512 def start(self):
2513 def cont(res):
2514 # Continue with start() method in superclass.
2515 return SourceBase.start(self)
2516
2517 if self.forceSharedRepo:
2518 d = self.doForceSharedRepo();
2519 d.addCallback(cont)
2520 return d
2521 else:
2522 return cont(None)
2523
2524 def doVCUpdate(self):
2525 assert not self.revision
2526 # update: possible for mode in ('copy', 'update')
2527 srcdir = os.path.join(self.builder.basedir, self.srcdir)
2528 command = [self.vcexe, 'update']
2529 c = ShellCommand(self.builder, command, srcdir,
2530 sendRC=False, timeout=self.timeout,
2531 maxTime=self.maxTime, usePTY=False)
2532 self.command = c
2533 return c.start()
2534
2535 def doVCFull(self):
2536 # checkout or export
2537 d = self.builder.basedir
2538 if self.mode == "export":
2539 # exporting in bzr requires a separate directory
2540 return self.doVCExport()
2541 # originally I added --lightweight here, but then 'bzr revno' is
2542 # wrong. The revno reported in 'bzr version-info' is correct,
2543 # however. Maybe this is a bzr bug?
2544 #
2545 # In addition, you cannot perform a 'bzr update' on a repo pulled
2546 # from an HTTP repository that used 'bzr checkout --lightweight'. You
2547 # get a "ERROR: Cannot lock: transport is read only" when you try.
2548 #
2549 # So I won't bother using --lightweight for now.
2550
2551 command = [self.vcexe, 'checkout']
2552 if self.revision:
2553 command.append('--revision')
2554 command.append(str(self.revision))
2555 command.append(self.repourl)
2556 command.append(self.srcdir)
2557
2558 c = ShellCommand(self.builder, command, d,
2559 sendRC=False, timeout=self.timeout,
2560 maxTime=self.maxTime, usePTY=False)
2561 self.command = c
2562 d = c.start()
2563 return d
2564
2565 def doVCExport(self):
2566 tmpdir = os.path.join(self.builder.basedir, "export-temp")
2567 srcdir = os.path.join(self.builder.basedir, self.srcdir)
2568 command = [self.vcexe, 'checkout', '--lightweight']
2569 if self.revision:
2570 command.append('--revision')
2571 command.append(str(self.revision))
2572 command.append(self.repourl)
2573 command.append(tmpdir)
2574 c = ShellCommand(self.builder, command, self.builder.basedir,
2575 sendRC=False, timeout=self.timeout,
2576 maxTime=self.maxTime, usePTY=False)
2577 self.command = c
2578 d = c.start()
2579 def _export(res):
2580 command = [self.vcexe, 'export', srcdir]
2581 c = ShellCommand(self.builder, command, tmpdir,
2582 sendRC=False, timeout=self.timeout,
2583 maxTime=self.maxTime, usePTY=False)
2584 self.command = c
2585 return c.start()
2586 d.addCallback(_export)
2587 return d
2588
2589 def doForceSharedRepo(self):
2590 # Don't send stderr. When there is no shared repo, this might confuse
2591 # users, as they will see a bzr error message. But having no shared
2592 # repo is not an error, just an indication that we need to make one.
2593 c = ShellCommand(self.builder, [self.vcexe, 'info', '.'],
2594 self.builder.basedir,
2595 sendStderr=False, sendRC=False, usePTY=False)
2596 d = c.start()
2597 def afterCheckSharedRepo(res):
2598 if type(res) is int and res != 0:
2599 log.msg("No shared repo found, creating it")
2600 # bzr info fails, try to create shared repo.
2601 c = ShellCommand(self.builder, [self.vcexe, 'init-repo', '.'],
2602 self.builder.basedir,
2603 sendRC=False, usePTY=False)
2604 self.command = c
2605 return c.start()
2606 else:
2607 return defer.succeed(res)
2608 d.addCallback(afterCheckSharedRepo)
2609 return d
2610
2611 def get_revision_number(self, out):
2612 # it feels like 'bzr revno' sometimes gives different results than
2613 # the 'revno:' line from 'bzr version-info', and the one from
2614 # version-info is more likely to be correct.
2615 for line in out.split("\n"):
2616 colon = line.find(":")
2617 if colon != -1:
2618 key, value = line[:colon], line[colon+2:]
2619 if key == "revno":
2620 return int(value)
2621 raise ValueError("unable to find revno: in bzr output: '%s'" % out)
2622
2623 def parseGotRevision(self):
2624 command = [self.vcexe, "version-info"]
2625 c = ShellCommand(self.builder, command,
2626 os.path.join(self.builder.basedir, self.srcdir),
2627 environ=self.env,
2628 sendStdout=False, sendStderr=False, sendRC=False,
2629 keepStdout=True, usePTY=False)
2630 d = c.start()
2631 def _parse(res):
2632 try:
2633 return self.get_revision_number(c.stdout)
2634 except ValueError:
2635 msg =("Bzr.parseGotRevision unable to parse output "
2636 "of bzr version-info: '%s'" % c.stdout.strip())
2637 log.msg(msg)
2638 self.sendStatus({'header': msg + "\n"})
2639 return None
2640 d.addCallback(_parse)
2641 return d
2642
2643 registerSlaveCommand("bzr", Bzr, command_version)
2644
2645 class Mercurial(SourceBase):
2646 """Mercurial specific VC operation. In addition to the arguments
2647 handled by SourceBase, this command reads the following keys:
2648
2649 ['repourl'] (required): the Mercurial repository string
2650 ['clobberOnBranchChange']: Document me. See ticket #462.
2651 """
2652
2653 header = "mercurial operation"
2654
2655 def setup(self, args):
2656 SourceBase.setup(self, args)
2657 self.vcexe = getCommand("hg")
2658 self.repourl = args['repourl']
2659 self.clobberOnBranchChange = args.get('clobberOnBranchChange', True)
2660 self.sourcedata = "%s\n" % self.repourl
2661 self.branchType = args.get('branchType', 'dirname')
2662 self.stdout = ""
2663 self.stderr = ""
2664
2665 def sourcedirIsUpdateable(self):
2666 return os.path.isdir(os.path.join(self.builder.basedir,
2667 self.srcdir, ".hg"))
2668
2669 def doVCUpdate(self):
2670 d = os.path.join(self.builder.basedir, self.srcdir)
2671 command = [self.vcexe, 'pull', '--verbose', self.repourl]
2672 c = ShellCommand(self.builder, command, d,
2673 sendRC=False, timeout=self.timeout,
2674 maxTime=self.maxTime, keepStdout=True, usePTY=False)
2675 self.command = c
2676 d = c.start()
2677 d.addCallback(self._handleEmptyUpdate)
2678 d.addCallback(self._update)
2679 return d
2680
2681 def _handleEmptyUpdate(self, res):
2682 if type(res) is int and res == 1:
2683 if self.command.stdout.find("no changes found") != -1:
2684 # 'hg pull', when it doesn't have anything to do, exits with
2685 # rc=1, and there appears to be no way to shut this off. It
2686 # emits a distinctive message to stdout, though. So catch
2687 # this and pretend that it completed successfully.
2688 return 0
2689 return res
2690
2691 def doVCFull(self):
2692 d = os.path.join(self.builder.basedir, self.srcdir)
2693 command = [self.vcexe, 'clone', '--verbose', '--noupdate']
2694
2695 # if got revision, clobbering and in dirname, only clone to specific rev ision
2696 # (otherwise, do full clone to re-use .hg dir for subsequent builds)
2697 if self.args.get('revision') and self.mode == 'clobber' and self.branchT ype == 'dirname':
2698 command.extend(['--rev', self.args.get('revision')])
2699 command.extend([self.repourl, d])
2700
2701 c = ShellCommand(self.builder, command, self.builder.basedir,
2702 sendRC=False, timeout=self.timeout,
2703 maxTime=self.maxTime, usePTY=False)
2704 self.command = c
2705 cmd1 = c.start()
2706 cmd1.addCallback(self._update)
2707 return cmd1
2708
2709 def _clobber(self, dummy, dirname):
2710 def _vcfull(res):
2711 return self.doVCFull()
2712
2713 c = self.doClobber(dummy, dirname)
2714 c.addCallback(_vcfull)
2715
2716 return c
2717
2718 def _purge(self, dummy, dirname):
2719 d = os.path.join(self.builder.basedir, self.srcdir)
2720 purge = [self.vcexe, 'purge', '--all']
2721 purgeCmd = ShellCommand(self.builder, purge, d,
2722 sendStdout=False, sendStderr=False,
2723 keepStdout=True, keepStderr=True, usePTY=False)
2724
2725 def _clobber(res):
2726 if res != 0:
2727 # purge failed, we need to switch to a classic clobber
2728 msg = "'hg purge' failed: %s\n%s. Clobbering." % (purgeCmd.stdou t, purgeCmd.stderr)
2729 self.sendStatus({'header': msg + "\n"})
2730 log.msg(msg)
2731
2732 return self._clobber(dummy, dirname)
2733
2734 # Purge was a success, then we need to update
2735 return self._update2(res)
2736
2737 p = purgeCmd.start()
2738 p.addCallback(_clobber)
2739 return p
2740
2741 def _update(self, res):
2742 if res != 0:
2743 return res
2744
2745 # compare current branch to update
2746 self.update_branch = self.args.get('branch', 'default')
2747
2748 d = os.path.join(self.builder.basedir, self.srcdir)
2749 parentscmd = [self.vcexe, 'identify', '--num', '--branch']
2750 cmd = ShellCommand(self.builder, parentscmd, d,
2751 sendStdout=False, sendStderr=False,
2752 keepStdout=True, keepStderr=True, usePTY=False)
2753
2754 self.clobber = None
2755
2756 def _parseIdentify(res):
2757 if res != 0:
2758 msg = "'hg identify' failed: %s\n%s" % (cmd.stdout, cmd.stderr)
2759 self.sendStatus({'header': msg + "\n"})
2760 log.msg(msg)
2761 return res
2762
2763 log.msg('Output: %s' % cmd.stdout)
2764
2765 match = re.search(r'^(.+) (.+)$', cmd.stdout)
2766 assert match
2767
2768 rev = match.group(1)
2769 current_branch = match.group(2)
2770
2771 if rev == '-1':
2772 msg = "Fresh hg repo, don't worry about in-repo branch name"
2773 log.msg(msg)
2774
2775 elif self.sourcedirIsPatched():
2776 self.clobber = self._purge
2777
2778 elif self.update_branch != current_branch:
2779 msg = "Working dir is on in-repo branch '%s' and build needs '%s '." % (current_branch, self.update_branch)
2780 if self.clobberOnBranchChange:
2781 msg += ' Cloberring.'
2782 else:
2783 msg += ' Updating.'
2784
2785 self.sendStatus({'header': msg + "\n"})
2786 log.msg(msg)
2787
2788 # Clobbers only if clobberOnBranchChange is set
2789 if self.clobberOnBranchChange:
2790 self.clobber = self._purge
2791
2792 else:
2793 msg = "Working dir on same in-repo branch as build (%s)." % (cur rent_branch)
2794 log.msg(msg)
2795
2796 return 0
2797
2798 def _checkRepoURL(res):
2799 parentscmd = [self.vcexe, 'paths', 'default']
2800 cmd2 = ShellCommand(self.builder, parentscmd, d,
2801 sendStdout=False, sendStderr=False,
2802 keepStdout=True, keepStderr=True, usePTY=False)
2803
2804 def _parseRepoURL(res):
2805 if res == 1:
2806 if "not found!" == cmd2.stderr.strip():
2807 msg = "hg default path not set. Not checking repo url fo r clobber test"
2808 log.msg(msg)
2809 return 0
2810 else:
2811 msg = "'hg paths default' failed: %s\n%s" % (cmd2.stdout , cmd2.stderr)
2812 log.msg(msg)
2813 return 1
2814
2815 oldurl = cmd2.stdout.strip()
2816
2817 log.msg("Repo cloned from: '%s'" % oldurl)
2818
2819 if sys.platform == "win32":
2820 oldurl = oldurl.lower().replace('\\', '/')
2821 repourl = self.repourl.lower().replace('\\', '/')
2822 if repourl.startswith('file://'):
2823 repourl = repourl.split('file://')[1]
2824 else:
2825 repourl = self.repourl
2826
2827 oldurl = remove_userpassword(oldurl)
2828 repourl = remove_userpassword(repourl)
2829
2830 if oldurl != repourl:
2831 self.clobber = self._clobber
2832 msg = "RepoURL changed from '%s' in wc to '%s' in update. Cl obbering" % (oldurl, repourl)
2833 log.msg(msg)
2834
2835 return 0
2836
2837 c = cmd2.start()
2838 c.addCallback(_parseRepoURL)
2839 return c
2840
2841 def _maybeClobber(res):
2842 if self.clobber:
2843 msg = "Clobber flag set. Doing clobbering"
2844 log.msg(msg)
2845
2846 def _vcfull(res):
2847 return self.doVCFull()
2848
2849 return self.clobber(None, self.srcdir)
2850
2851 return 0
2852
2853 c = cmd.start()
2854 c.addCallback(_parseIdentify)
2855 c.addCallback(_checkRepoURL)
2856 c.addCallback(_maybeClobber)
2857 c.addCallback(self._update2)
2858 return c
2859
2860 def _update2(self, res):
2861 d = os.path.join(self.builder.basedir, self.srcdir)
2862
2863 updatecmd=[self.vcexe, 'update', '--clean', '--repository', d]
2864 if self.args.get('revision'):
2865 updatecmd.extend(['--rev', self.args['revision']])
2866 else:
2867 updatecmd.extend(['--rev', self.args.get('branch', 'default')])
2868 self.command = ShellCommand(self.builder, updatecmd,
2869 self.builder.basedir, sendRC=False,
2870 timeout=self.timeout, maxTime=self.maxTime, usePTY=False)
2871 return self.command.start()
2872
2873 def parseGotRevision(self):
2874 # we use 'hg identify' to find out what we wound up with
2875 command = [self.vcexe, "identify"]
2876 c = ShellCommand(self.builder, command,
2877 os.path.join(self.builder.basedir, self.srcdir),
2878 environ=self.env,
2879 sendStdout=False, sendStderr=False, sendRC=False,
2880 keepStdout=True, usePTY=False)
2881 d = c.start()
2882 def _parse(res):
2883 m = re.search(r'^(\w+)', c.stdout)
2884 return m.group(1)
2885 d.addCallback(_parse)
2886 return d
2887
2888 registerSlaveCommand("hg", Mercurial, command_version)
2889
2890
2891 class P4Base(SourceBase):
2892 """Base class for P4 source-updaters
2893
2894 ['p4port'] (required): host:port for server to access
2895 ['p4user'] (optional): user to use for access
2896 ['p4passwd'] (optional): passwd to try for the user
2897 ['p4client'] (optional): client spec to use
2898 """
2899 def setup(self, args):
2900 SourceBase.setup(self, args)
2901 self.p4port = args['p4port']
2902 self.p4client = args['p4client']
2903 self.p4user = args['p4user']
2904 self.p4passwd = args['p4passwd']
2905
2906 def parseGotRevision(self):
2907 # Executes a p4 command that will give us the latest changelist number
2908 # of any file under the current (or default) client:
2909 command = ['p4']
2910 if self.p4port:
2911 command.extend(['-p', self.p4port])
2912 if self.p4user:
2913 command.extend(['-u', self.p4user])
2914 if self.p4passwd:
2915 command.extend(['-P', Obfuscated(self.p4passwd, 'XXXXXXXX')])
2916 if self.p4client:
2917 command.extend(['-c', self.p4client])
2918 # add '-s submitted' for bug #626
2919 command.extend(['changes', '-s', 'submitted', '-m', '1', '#have'])
2920 c = ShellCommand(self.builder, command, self.builder.basedir,
2921 environ=self.env, timeout=self.timeout,
2922 maxTime=self.maxTime, sendStdout=True,
2923 sendStderr=False, sendRC=False, keepStdout=True,
2924 usePTY=False)
2925 self.command = c
2926 d = c.start()
2927
2928 def _parse(res):
2929 # 'p4 -c clien-name change -m 1 "#have"' will produce an output like :
2930 # "Change 28147 on 2008/04/07 by p4user@hostname..."
2931 # The number after "Change" is the one we want.
2932 m = re.match('Change\s+(\d+)\s+', c.stdout)
2933 if m:
2934 return m.group(1)
2935 return None
2936 d.addCallback(_parse)
2937 return d
2938
2939
2940 class P4(P4Base):
2941 """A P4 source-updater.
2942
2943 ['p4port'] (required): host:port for server to access
2944 ['p4user'] (optional): user to use for access
2945 ['p4passwd'] (optional): passwd to try for the user
2946 ['p4client'] (optional): client spec to use
2947 ['p4extra_views'] (optional): additional client views to use
2948 """
2949
2950 header = "p4"
2951
2952 def setup(self, args):
2953 P4Base.setup(self, args)
2954 self.p4base = args['p4base']
2955 self.p4extra_views = args['p4extra_views']
2956 self.p4mode = args['mode']
2957 self.p4branch = args['branch']
2958
2959 self.sourcedata = str([
2960 # Perforce server.
2961 self.p4port,
2962
2963 # Client spec.
2964 self.p4client,
2965
2966 # Depot side of view spec.
2967 self.p4base,
2968 self.p4branch,
2969 self.p4extra_views,
2970
2971 # Local side of view spec (srcdir is made from these).
2972 self.builder.basedir,
2973 self.mode,
2974 self.workdir
2975 ])
2976
2977
2978 def sourcedirIsUpdateable(self):
2979 # We assume our client spec is still around.
2980 # We just say we aren't updateable if the dir doesn't exist so we
2981 # don't get ENOENT checking the sourcedata.
2982 return (not self.sourcedirIsPatched() and
2983 os.path.isdir(os.path.join(self.builder.basedir,
2984 self.srcdir)))
2985
2986 def doVCUpdate(self):
2987 return self._doP4Sync(force=False)
2988
2989 def _doP4Sync(self, force):
2990 command = ['p4']
2991
2992 if self.p4port:
2993 command.extend(['-p', self.p4port])
2994 if self.p4user:
2995 command.extend(['-u', self.p4user])
2996 if self.p4passwd:
2997 command.extend(['-P', Obfuscated(self.p4passwd, 'XXXXXXXX')])
2998 if self.p4client:
2999 command.extend(['-c', self.p4client])
3000 command.extend(['sync'])
3001 if force:
3002 command.extend(['-f'])
3003 if self.revision:
3004 command.extend(['@' + str(self.revision)])
3005 env = {}
3006 c = ShellCommand(self.builder, command, self.builder.basedir,
3007 environ=env, sendRC=False, timeout=self.timeout,
3008 maxTime=self.maxTime, keepStdout=True, usePTY=False)
3009 self.command = c
3010 d = c.start()
3011 d.addCallback(self._abandonOnFailure)
3012 return d
3013
3014
3015 def doVCFull(self):
3016 env = {}
3017 command = ['p4']
3018 client_spec = ''
3019 client_spec += "Client: %s\n\n" % self.p4client
3020 client_spec += "Owner: %s\n\n" % self.p4user
3021 client_spec += "Description:\n\tCreated by %s\n\n" % self.p4user
3022 client_spec += "Root:\t%s\n\n" % self.builder.basedir
3023 client_spec += "Options:\tallwrite rmdir\n\n"
3024 client_spec += "LineEnd:\tlocal\n\n"
3025
3026 # Setup a view
3027 client_spec += "View:\n\t%s" % (self.p4base)
3028 if self.p4branch:
3029 client_spec += "%s/" % (self.p4branch)
3030 client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir)
3031 if self.p4extra_views:
3032 for k, v in self.p4extra_views:
3033 client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client,
3034 self.srcdir, v)
3035 if self.p4port:
3036 command.extend(['-p', self.p4port])
3037 if self.p4user:
3038 command.extend(['-u', self.p4user])
3039 if self.p4passwd:
3040 command.extend(['-P', Obfuscated(self.p4passwd, 'XXXXXXXX')])
3041 command.extend(['client', '-i'])
3042 log.msg(client_spec)
3043 c = ShellCommand(self.builder, command, self.builder.basedir,
3044 environ=env, sendRC=False, timeout=self.timeout,
3045 maxTime=self.maxTime, initialStdin=client_spec,
3046 usePTY=False)
3047 self.command = c
3048 d = c.start()
3049 d.addCallback(self._abandonOnFailure)
3050 d.addCallback(lambda _: self._doP4Sync(force=True))
3051 return d
3052
3053 def parseGotRevision(self):
3054 rv = None
3055 if self.revision:
3056 rv = str(self.revision)
3057 return rv
3058
3059 registerSlaveCommand("p4", P4, command_version)
3060
3061
3062 class P4Sync(P4Base):
3063 """A partial P4 source-updater. Requires manual setup of a per-slave P4
3064 environment. The only thing which comes from the master is P4PORT.
3065 'mode' is required to be 'copy'.
3066
3067 ['p4port'] (required): host:port for server to access
3068 ['p4user'] (optional): user to use for access
3069 ['p4passwd'] (optional): passwd to try for the user
3070 ['p4client'] (optional): client spec to use
3071 """
3072
3073 header = "p4 sync"
3074
3075 def setup(self, args):
3076 P4Base.setup(self, args)
3077 self.vcexe = getCommand("p4")
3078
3079 def sourcedirIsUpdateable(self):
3080 return True
3081
3082 def _doVC(self, force):
3083 d = os.path.join(self.builder.basedir, self.srcdir)
3084 command = [self.vcexe]
3085 if self.p4port:
3086 command.extend(['-p', self.p4port])
3087 if self.p4user:
3088 command.extend(['-u', self.p4user])
3089 if self.p4passwd:
3090 command.extend(['-P', Obfuscated(self.p4passwd, 'XXXXXXXX')])
3091 if self.p4client:
3092 command.extend(['-c', self.p4client])
3093 command.extend(['sync'])
3094 if force:
3095 command.extend(['-f'])
3096 if self.revision:
3097 command.extend(['@' + self.revision])
3098 env = {}
3099 c = ShellCommand(self.builder, command, d, environ=env,
3100 sendRC=False, timeout=self.timeout,
3101 maxTime=self.maxTime, usePTY=False)
3102 self.command = c
3103 return c.start()
3104
3105 def doVCUpdate(self):
3106 return self._doVC(force=False)
3107
3108 def doVCFull(self):
3109 return self._doVC(force=True)
3110
3111 def parseGotRevision(self):
3112 rv = None
3113 if self.revision:
3114 rv = str(self.revision)
3115 return rv
3116
3117 registerSlaveCommand("p4sync", P4Sync, command_version)
OLDNEW
« no previous file with comments | « third_party/buildbot_7_12/buildbot/slave/bot.py ('k') | third_party/buildbot_7_12/buildbot/slave/interfaces.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698