OLD | NEW |
| (Empty) |
1 # -*- test-case-name: buildbot.test.test_slaves -*- | |
2 | |
3 # Portions copyright Canonical Ltd. 2009 | |
4 | |
5 from twisted.trial import unittest | |
6 from twisted.internet import defer, reactor | |
7 from twisted.python import log, runtime, failure | |
8 | |
9 from buildbot.buildslave import AbstractLatentBuildSlave | |
10 from buildbot.test.runutils import RunMixin | |
11 from buildbot.sourcestamp import SourceStamp | |
12 from buildbot.process.base import BuildRequest | |
13 from buildbot.status.builder import SUCCESS | |
14 from buildbot.status import mail | |
15 from buildbot.slave import bot | |
16 | |
17 config_1 = """ | |
18 from buildbot.process import factory | |
19 from buildbot.steps import dummy | |
20 from buildbot.buildslave import BuildSlave | |
21 from buildbot.config import BuilderConfig | |
22 s = factory.s | |
23 | |
24 BuildmasterConfig = c = {} | |
25 c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit'), | |
26 BuildSlave('bot3', 'sekrit')] | |
27 c['schedulers'] = [] | |
28 c['slavePortnum'] = 0 | |
29 c['schedulers'] = [] | |
30 | |
31 f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)]) | |
32 f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)]) | |
33 f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)]) | |
34 f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)]) | |
35 | |
36 c['builders'] = [ | |
37 BuilderConfig(name='b1', slavenames=['bot1', 'bot2', 'bot3'], factory=f1), | |
38 ] | |
39 """ | |
40 | |
41 config_2 = config_1 + """ | |
42 | |
43 c['builders'] = [ | |
44 BuilderConfig(name='b1', slavenames=['bot1', 'bot2', 'bot3'], factory=f2), | |
45 ] | |
46 | |
47 """ | |
48 | |
49 config_busyness = config_1 + """ | |
50 c['builders'] = [ | |
51 BuilderConfig(name='b1', slavenames=['bot1', ], factory=f3), | |
52 BuilderConfig(name='b2', slavenames=['bot1', ], factory=f4), | |
53 ] | |
54 """ | |
55 | |
56 class Slave(RunMixin, unittest.TestCase): | |
57 | |
58 def setUp(self): | |
59 RunMixin.setUp(self) | |
60 self.master.loadConfig(config_1) | |
61 self.master.startService() | |
62 d = self.connectSlave(["b1"]) | |
63 d.addCallback(lambda res: self.connectSlave(["b1"], "bot2")) | |
64 return d | |
65 | |
66 def doBuild(self, buildername): | |
67 br = BuildRequest("forced", SourceStamp(), 'test_builder') | |
68 d = br.waitUntilFinished() | |
69 self.control.getBuilder(buildername).requestBuild(br) | |
70 return d | |
71 | |
72 def testSequence(self): | |
73 # make sure both slaves appear in the list. | |
74 attached_slaves = [c for c in self.master.botmaster.slaves.values() | |
75 if c.slave] | |
76 self.failUnlessEqual(len(attached_slaves), 2) | |
77 b = self.master.botmaster.builders["b1"] | |
78 self.failUnlessEqual(len(b.slaves), 2) | |
79 | |
80 # since the current scheduling algorithm is simple and does not | |
81 # rotate or attempt any sort of load-balancing, two builds in | |
82 # sequence should both use the first slave. This may change later if | |
83 # we move to a more sophisticated scheme. | |
84 b.CHOOSE_SLAVES_RANDOMLY = False | |
85 | |
86 d = self.doBuild("b1") | |
87 d.addCallback(self._testSequence_1) | |
88 return d | |
89 def _testSequence_1(self, res): | |
90 self.failUnlessEqual(res.getResults(), SUCCESS) | |
91 self.failUnlessEqual(res.getSlavename(), "bot1") | |
92 | |
93 d = self.doBuild("b1") | |
94 d.addCallback(self._testSequence_2) | |
95 return d | |
96 def _testSequence_2(self, res): | |
97 self.failUnlessEqual(res.getSlavename(), "bot1") | |
98 | |
99 | |
100 def testSimultaneous(self): | |
101 # make sure we can actually run two builds at the same time | |
102 d1 = self.doBuild("b1") | |
103 d2 = self.doBuild("b1") | |
104 d1.addCallback(self._testSimultaneous_1, d2) | |
105 return d1 | |
106 def _testSimultaneous_1(self, res, d2): | |
107 self.failUnlessEqual(res.getResults(), SUCCESS) | |
108 b1_slavename = res.getSlavename() | |
109 d2.addCallback(self._testSimultaneous_2, b1_slavename) | |
110 return d2 | |
111 def _testSimultaneous_2(self, res, b1_slavename): | |
112 self.failUnlessEqual(res.getResults(), SUCCESS) | |
113 b2_slavename = res.getSlavename() | |
114 # make sure the two builds were run by different slaves | |
115 slavenames = [b1_slavename, b2_slavename] | |
116 slavenames.sort() | |
117 self.failUnlessEqual(slavenames, ["bot1", "bot2"]) | |
118 | |
119 def testFallback1(self): | |
120 # detach the first slave, verify that a build is run using the second | |
121 # slave instead | |
122 d = self.shutdownSlave("bot1", "b1") | |
123 d.addCallback(self._testFallback1_1) | |
124 return d | |
125 def _testFallback1_1(self, res): | |
126 attached_slaves = [c for c in self.master.botmaster.slaves.values() | |
127 if c.slave] | |
128 self.failUnlessEqual(len(attached_slaves), 1) | |
129 self.failUnlessEqual(len(self.master.botmaster.builders["b1"].slaves), | |
130 1) | |
131 d = self.doBuild("b1") | |
132 d.addCallback(self._testFallback1_2) | |
133 return d | |
134 def _testFallback1_2(self, res): | |
135 self.failUnlessEqual(res.getResults(), SUCCESS) | |
136 self.failUnlessEqual(res.getSlavename(), "bot2") | |
137 | |
138 def testFallback2(self): | |
139 # Disable the first slave, so that a slaveping will timeout. Then | |
140 # start a build, and verify that the non-failing (second) one is | |
141 # claimed for the build, and that the failing one is removed from the | |
142 # list. | |
143 | |
144 b1 = self.master.botmaster.builders["b1"] | |
145 # reduce the ping time so we'll failover faster | |
146 assert b1.CHOOSE_SLAVES_RANDOMLY | |
147 b1.CHOOSE_SLAVES_RANDOMLY = False | |
148 self.disappearSlave("bot1", "b1", allowReconnect=False) | |
149 d = self.doBuild("b1") | |
150 self.killSlave("bot1", "b1") | |
151 d.addCallback(self._testFallback2_1) | |
152 return d | |
153 def _testFallback2_1(self, res): | |
154 self.failUnlessEqual(res.getResults(), SUCCESS) | |
155 self.failUnlessEqual(res.getSlavename(), "bot2") | |
156 b1slaves = self.master.botmaster.builders["b1"].slaves | |
157 self.failUnlessEqual(len(b1slaves), 1, "whoops: %s" % (b1slaves,)) | |
158 self.failUnlessEqual(b1slaves[0].slave.slavename, "bot2") | |
159 | |
160 | |
161 def notFinished(self, brs): | |
162 # utility method | |
163 builds = brs.getBuilds() | |
164 self.failIf(len(builds) > 1) | |
165 if builds: | |
166 self.failIf(builds[0].isFinished()) | |
167 | |
168 def testDontClaimPingingSlave(self): | |
169 # have two slaves connect for the same builder. Do something to the | |
170 # first one so that slavepings are delayed (but do not fail | |
171 # outright). | |
172 timers = [] | |
173 self.slaves['bot1'].debugOpts["stallPings"] = (10, timers) | |
174 br = BuildRequest("forced", SourceStamp(), 'test_builder') | |
175 d1 = br.waitUntilFinished() | |
176 self.master.botmaster.builders["b1"].CHOOSE_SLAVES_RANDOMLY = False | |
177 self.control.getBuilder("b1").requestBuild(br) | |
178 s1 = br.status # this is a BuildRequestStatus | |
179 # give it a chance to start pinging | |
180 d2 = defer.Deferred() | |
181 d2.addCallback(self._testDontClaimPingingSlave_1, d1, s1, timers) | |
182 reactor.callLater(1, d2.callback, None) | |
183 return d2 | |
184 def _testDontClaimPingingSlave_1(self, res, d1, s1, timers): | |
185 # now the first build is running (waiting on the ping), so start the | |
186 # second build. This should claim the second slave, not the first, | |
187 # because the first is busy doing the ping. | |
188 self.notFinished(s1) | |
189 d3 = self.doBuild("b1") | |
190 d3.addCallback(self._testDontClaimPingingSlave_2, d1, s1, timers) | |
191 return d3 | |
192 def _testDontClaimPingingSlave_2(self, res, d1, s1, timers): | |
193 self.failUnlessEqual(res.getSlavename(), "bot2") | |
194 self.notFinished(s1) | |
195 # now let the ping complete | |
196 self.failUnlessEqual(len(timers), 1) | |
197 timers[0].reset(0) | |
198 d1.addCallback(self._testDontClaimPingingSlave_3) | |
199 return d1 | |
200 def _testDontClaimPingingSlave_3(self, res): | |
201 self.failUnlessEqual(res.getSlavename(), "bot1") | |
202 | |
203 class FakeLatentBuildSlave(AbstractLatentBuildSlave): | |
204 | |
205 testcase = None | |
206 stop_wait = None | |
207 start_message = None | |
208 stopped = testing_substantiation_timeout = False | |
209 | |
210 def start_instance(self): | |
211 # responsible for starting instance that will try to connect with | |
212 # this master | |
213 # simulate having to do some work. | |
214 d = defer.Deferred() | |
215 if not self.testing_substantiation_timeout: | |
216 reactor.callLater(0, self._start_instance, d) | |
217 return d | |
218 | |
219 def _start_instance(self, d): | |
220 self.testcase.connectOneSlave(self.slavename) | |
221 d.callback(self.start_message) | |
222 | |
223 def stop_instance(self, fast=False): | |
224 # responsible for shutting down instance | |
225 # we're going to emulate dropping off the net. | |
226 | |
227 # simulate this by replacing the slave Broker's .dataReceived method | |
228 # with one that just throws away all data. | |
229 self.fast_stop_request = fast | |
230 if self.slavename not in self.testcase.slaves: | |
231 assert self.testing_substantiation_timeout | |
232 self.stopped = True | |
233 return defer.succeed(None) | |
234 d = defer.Deferred() | |
235 if self.stop_wait is None: | |
236 self._stop_instance(d) | |
237 else: | |
238 reactor.callLater(self.stop_wait, self._stop_instance, d) | |
239 return d | |
240 | |
241 def _stop_instance(self, d): | |
242 try: | |
243 s = self.testcase.slaves.pop(self.slavename) | |
244 except KeyError: | |
245 pass | |
246 else: | |
247 def discard(data): | |
248 pass | |
249 bot = s.getServiceNamed("bot") | |
250 for buildername in self.slavebuilders: | |
251 remote = bot.builders[buildername].remote | |
252 if remote is None: | |
253 continue | |
254 broker = remote.broker | |
255 broker.dataReceived = discard # seal its ears | |
256 broker.transport.write = discard # and take away its voice | |
257 # also discourage it from reconnecting once the connection goes away | |
258 s.bf.continueTrying = False | |
259 # stop the service for cleanliness | |
260 s.stopService() | |
261 d.callback(None) | |
262 | |
263 latent_config = """ | |
264 from buildbot.process import factory | |
265 from buildbot.steps import dummy | |
266 from buildbot.buildslave import BuildSlave | |
267 from buildbot.test.test_slaves import FakeLatentBuildSlave | |
268 from buildbot.config import BuilderConfig | |
269 s = factory.s | |
270 | |
271 BuildmasterConfig = c = {} | |
272 c['slaves'] = [FakeLatentBuildSlave('bot1', 'sekrit', | |
273 ), | |
274 FakeLatentBuildSlave('bot2', 'sekrit', | |
275 ), | |
276 BuildSlave('bot3', 'sekrit')] | |
277 c['schedulers'] = [] | |
278 c['slavePortnum'] = 0 | |
279 c['schedulers'] = [] | |
280 | |
281 f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)]) | |
282 f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)]) | |
283 f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)]) | |
284 f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)]) | |
285 | |
286 c['builders'] = [ | |
287 BuilderConfig(name='b1', slavenames=['bot1', 'bot2', 'bot3'], factory=f1), | |
288 ] | |
289 """ | |
290 | |
291 | |
292 class LatentSlave(RunMixin, unittest.TestCase): | |
293 | |
294 def setUp(self): | |
295 # debugging | |
296 #import twisted.internet.base | |
297 #twisted.internet.base.DelayedCall.debug = True | |
298 # debugging | |
299 RunMixin.setUp(self) | |
300 self.master.loadConfig(latent_config) | |
301 self.master.startService() | |
302 self.bot1 = self.master.botmaster.slaves['bot1'] | |
303 self.bot2 = self.master.botmaster.slaves['bot2'] | |
304 self.bot3 = self.master.botmaster.slaves['bot3'] | |
305 self.bot1.testcase = self | |
306 self.bot2.testcase = self | |
307 self.b1 = self.master.botmaster.builders['b1'] | |
308 | |
309 def doBuild(self, buildername): | |
310 br = BuildRequest("forced", SourceStamp(), 'test_builder') | |
311 d = br.waitUntilFinished() | |
312 self.control.getBuilder(buildername).requestBuild(br) | |
313 return d | |
314 | |
315 def testSequence(self): | |
316 # make sure both slaves appear in the builder. This is automatically, | |
317 # without any attaching. | |
318 self.assertEqual(len(self.b1.slaves), 2) | |
319 slavenames = [sb.slave.slavename for sb in self.b1.slaves] | |
320 slavenames.sort() | |
321 self.assertEqual(slavenames, | |
322 ['bot1', 'bot2']) | |
323 # These have not substantiated | |
324 self.assertEqual([sb.slave.substantiated for sb in self.b1.slaves], | |
325 [False, False]) | |
326 self.assertEqual([sb.slave.slave for sb in self.b1.slaves], | |
327 [None, None]) | |
328 # we can mix and match latent slaves and normal slaves. ATM, they | |
329 # are treated identically in terms of selecting slaves. | |
330 d = self.connectSlave(builders=['b1'], slavename='bot3') | |
331 d.addCallback(self._testSequence_1) | |
332 return d | |
333 def _testSequence_1(self, res): | |
334 # now we have all three slaves. Two are latent slaves, and one is a | |
335 # standard slave. | |
336 slavenames = [sb.slave.slavename for sb in self.b1.slaves] | |
337 slavenames.sort() | |
338 self.assertEqual(slavenames, | |
339 ['bot1', 'bot2', 'bot3']) | |
340 # Now it's time to try a build on one of the latent slaves, | |
341 # substantiating it. | |
342 # since the current scheduling algorithm is simple and does not | |
343 # rotate or attempt any sort of load-balancing, two builds in | |
344 # sequence should both use the first slave. This may change later if | |
345 # we move to a more sophisticated scheme. | |
346 self.b1.CHOOSE_SLAVES_RANDOMLY = False | |
347 | |
348 self.build_deferred = self.doBuild("b1") | |
349 # now there's an event waiting for the slave to substantiate. | |
350 e = self.b1.builder_status.getEvent(-1) | |
351 self.assertEqual(e.text, ['substantiating']) | |
352 # the substantiation_deferred is an internal stash of a deferred | |
353 # that we'll grab so we can find the point at which the slave is | |
354 # substantiated but the build has not yet started. | |
355 d = self.bot1.substantiation_deferred | |
356 self.assertNotIdentical(d, None) | |
357 d.addCallback(self._testSequence_2) | |
358 return d | |
359 def _testSequence_2(self, res): | |
360 # bot 1 is substantiated. | |
361 self.assertNotIdentical(self.bot1.slave, None) | |
362 self.failUnless(self.bot1.substantiated) | |
363 # the event has announced it's success | |
364 e = self.b1.builder_status.getEvent(-1) | |
365 self.assertEqual(e.text, ['substantiate', 'success']) | |
366 self.assertNotIdentical(e.finished, None) | |
367 # now we'll wait for the build to complete | |
368 d = self.build_deferred | |
369 del self.build_deferred | |
370 d.addCallback(self._testSequence_3) | |
371 return d | |
372 def _testSequence_3(self, res): | |
373 # build was a success! | |
374 self.failUnlessEqual(res.getResults(), SUCCESS) | |
375 self.failUnlessEqual(res.getSlavename(), "bot1") | |
376 # bot1 is substantiated now. bot2 has not. | |
377 self.failUnless(self.bot1.substantiated) | |
378 self.failIf(self.bot2.substantiated) | |
379 # bot1 is waiting a bit to see if there will be another build before | |
380 # it shuts down the instance ("insubstantiates") | |
381 self.build_wait_timer = self.bot1.build_wait_timer | |
382 self.assertNotIdentical(self.build_wait_timer, None) | |
383 self.failUnless(self.build_wait_timer.active()) | |
384 self.assertApproximates( | |
385 self.bot1.build_wait_timeout, | |
386 self.build_wait_timer.time - runtime.seconds(), | |
387 2) | |
388 # now we'll do another build | |
389 d = self.doBuild("b1") | |
390 # the slave is already substantiated, so no event is created | |
391 e = self.b1.builder_status.getEvent(-1) | |
392 self.assertNotEqual(e.text, ['substantiating']) | |
393 # wait for the next build | |
394 d.addCallback(self._testSequence_4) | |
395 return d | |
396 def _testSequence_4(self, res): | |
397 # build was a success! | |
398 self.failUnlessEqual(res.getResults(), SUCCESS) | |
399 self.failUnlessEqual(res.getSlavename(), "bot1") | |
400 # bot1 is still waiting, but with a new timer | |
401 self.assertNotIdentical(self.bot1.build_wait_timer, None) | |
402 self.assertNotIdentical(self.build_wait_timer, | |
403 self.bot1.build_wait_timer) | |
404 self.assertApproximates( | |
405 self.bot1.build_wait_timeout, | |
406 self.bot1.build_wait_timer.time - runtime.seconds(), | |
407 2) | |
408 del self.build_wait_timer | |
409 # We'll set the timer to fire sooner, and wait for it to fire. | |
410 self.bot1.build_wait_timer.reset(0) | |
411 d = defer.Deferred() | |
412 reactor.callLater(1, d.callback, None) | |
413 d.addCallback(self._testSequence_5) | |
414 return d | |
415 def _testSequence_5(self, res): | |
416 # slave is insubstantiated | |
417 self.assertIdentical(self.bot1.slave, None) | |
418 self.failIf(self.bot1.substantiated) | |
419 # Now we'll start up another build, to show that the shutdown left | |
420 # things in such a state that we can restart. | |
421 d = self.doBuild("b1") | |
422 # the bot can return an informative message on success that the event | |
423 # will render. Let's use a mechanism of our test latent bot to | |
424 # demonstrate that. | |
425 self.bot1.start_message = ['[instance id]', '[start-up time]'] | |
426 # here's our event again: | |
427 self.e = self.b1.builder_status.getEvent(-1) | |
428 self.assertEqual(self.e.text, ['substantiating']) | |
429 d.addCallback(self._testSequence_6) | |
430 return d | |
431 def _testSequence_6(self, res): | |
432 # build was a success! | |
433 self.failUnlessEqual(res.getResults(), SUCCESS) | |
434 self.failUnlessEqual(res.getSlavename(), "bot1") | |
435 # the event has announced it's success. (Just imagine that | |
436 # [instance id] and [start-up time] were actually valuable | |
437 # information.) | |
438 e = self.e | |
439 del self.e | |
440 self.assertEqual( | |
441 e.text, | |
442 ['substantiate', 'success', '[instance id]', '[start-up time]']) | |
443 # Now we need to clean up the timer. We could just cancel it, but | |
444 # we'll go through the full dance once more time to show we can. | |
445 # We'll set the timer to fire sooner, and wait for it to fire. | |
446 # Also, we'll set the build_slave to take a little bit longer to shut | |
447 # down, to see that it doesn't affect anything. | |
448 self.bot1.stop_wait = 2 | |
449 self.bot1.build_wait_timer.reset(0) | |
450 d = defer.Deferred() | |
451 reactor.callLater(1, d.callback, None) | |
452 d.addCallback(self._testSequence_7) | |
453 return d | |
454 def _testSequence_7(self, res): | |
455 # slave is insubstantiated | |
456 self.assertIdentical(self.bot1.slave, None) | |
457 self.assertNot(self.bot1.substantiated) | |
458 # the remote is still not cleaned out. We'll wait for it. | |
459 d = defer.Deferred() | |
460 reactor.callLater(1, d.callback, None) | |
461 return d | |
462 | |
463 def testNeverSubstantiated(self): | |
464 # When a substantiation is requested, the slave may never appear. | |
465 # This is a serious problem, and recovering from it is not really | |
466 # handled well right now (in part because a way to handle it is not | |
467 # clear). However, at the least, the status event will show a | |
468 # failure, and the slave will be told to insubstantiate, and to be | |
469 # removed from the botmaster as anavailable slave. | |
470 # This tells our test bot to never start, and to not complain about | |
471 # being told to stop without ever starting | |
472 self.bot1.testing_substantiation_timeout = True | |
473 # normally (by default) we have 20 minutes to try and connect to the | |
474 # remote | |
475 self.assertEqual(self.bot1.missing_timeout, 20*60) | |
476 # for testing purposes, we'll put that down to a tenth of a second! | |
477 self.bot1.missing_timeout = 0.1 | |
478 # since the current scheduling algorithm is simple and does not | |
479 # rotate or attempt any sort of load-balancing, two builds in | |
480 # sequence should both use the first slave. This may change later if | |
481 # we move to a more sophisticated scheme. | |
482 self.b1.CHOOSE_SLAVES_RANDOMLY = False | |
483 # start a build | |
484 self.build_deferred = self.doBuild('b1') | |
485 # the event tells us we are instantiating, as usual | |
486 e = self.b1.builder_status.getEvent(-1) | |
487 self.assertEqual(e.text, ['substantiating']) | |
488 # we'll see in a moment that the test flag we have to show that the | |
489 # bot was told to insubstantiate has been fired. Here, we just verify | |
490 # that it is ready to be fired. | |
491 self.failIf(self.bot1.stopped) | |
492 # That substantiation is going to fail. Let's wait for it. | |
493 d = self.bot1.substantiation_deferred | |
494 self.assertNotIdentical(d, None) | |
495 d.addCallbacks(self._testNeverSubstantiated_BadSuccess, | |
496 self._testNeverSubstantiated_1) | |
497 return d | |
498 def _testNeverSubstantiated_BadSuccess(self, res): | |
499 self.fail('we should not have succeeded here.') | |
500 def _testNeverSubstantiated_1(self, res): | |
501 # ok, we failed. | |
502 self.assertIdentical(self.bot1.slave, None) | |
503 self.failIf(self.bot1.substantiated) | |
504 self.failUnless(isinstance(res, failure.Failure)) | |
505 self.assertIdentical(self.bot1.substantiation_deferred, None) | |
506 # our event informs us of this | |
507 e1 = self.b1.builder_status.getEvent(-3) | |
508 self.assertEqual(e1.text, ['substantiate', 'failed']) | |
509 self.assertNotIdentical(e1.finished, None) | |
510 # the slave is no longer available to build. The events show it... | |
511 e2 = self.b1.builder_status.getEvent(-2) | |
512 self.assertEqual(e2.text, ['removing', 'latent', 'bot1']) | |
513 e3 = self.b1.builder_status.getEvent(-1) | |
514 self.assertEqual(e3.text, ['disconnect', 'bot1']) | |
515 # ...and the builder shows it. | |
516 self.assertEqual(['bot2'], | |
517 [sb.slave.slavename for sb in self.b1.slaves]) | |
518 # ideally, we would retry the build, but that infrastructure (which | |
519 # would be used for other situations in the builder as well) does not | |
520 # yet exist. Therefore the build never completes one way or the | |
521 # other, just as if a normal slave detached. | |
522 | |
523 def testServiceStop(self): | |
524 # if the slave has an instance when it is stopped, the slave should | |
525 # be told to shut down. | |
526 self.b1.CHOOSE_SLAVES_RANDOMLY = False | |
527 d = self.doBuild("b1") | |
528 d.addCallback(self._testServiceStop_1) | |
529 return d | |
530 def _testServiceStop_1(self, res): | |
531 # build was a success! | |
532 self.failUnlessEqual(res.getResults(), SUCCESS) | |
533 self.failUnlessEqual(res.getSlavename(), "bot1") | |
534 # bot 1 is substantiated. | |
535 self.assertNotIdentical(self.bot1.slave, None) | |
536 self.failUnless(self.bot1.substantiated) | |
537 # now let's stop the bot. | |
538 d = self.bot1.stopService() | |
539 d.addCallback(self._testServiceStop_2) | |
540 return d | |
541 def _testServiceStop_2(self, res): | |
542 # bot 1 is NOT substantiated. | |
543 self.assertIdentical(self.bot1.slave, None) | |
544 self.failIf(self.bot1.substantiated) | |
545 | |
546 def testPing(self): | |
547 # While a latent slave pings normally when it is substantiated, (as | |
548 # happens behind the scene when a build is request), when | |
549 # it is insubstantial, the ping is a no-op success. | |
550 self.assertIdentical(self.bot1.slave, None) | |
551 self.failIf(self.bot1.substantiated) | |
552 d = self.connectSlave(builders=['b1'], slavename='bot3') | |
553 d.addCallback(self._testPing_1) | |
554 return d | |
555 def _testPing_1(self, res): | |
556 slavenames = [sb.slave.slavename for sb in self.b1.slaves] | |
557 slavenames.sort() | |
558 self.assertEqual(slavenames, | |
559 ['bot1', 'bot2', 'bot3']) | |
560 d = self.control.getBuilder('b1').ping() | |
561 d.addCallback(self._testPing_2) | |
562 return d | |
563 def _testPing_2(self, res): | |
564 # all three pings were successful | |
565 self.assert_(res) | |
566 # but neither bot1 not bot2 substantiated. | |
567 self.assertIdentical(self.bot1.slave, None) | |
568 self.failIf(self.bot1.substantiated) | |
569 self.assertIdentical(self.bot2.slave, None) | |
570 self.failIf(self.bot2.substantiated) | |
571 | |
572 | |
573 class SlaveBusyness(RunMixin, unittest.TestCase): | |
574 | |
575 def setUp(self): | |
576 RunMixin.setUp(self) | |
577 self.master.loadConfig(config_busyness) | |
578 self.master.startService() | |
579 d = self.connectSlave(["b1", "b2"]) | |
580 return d | |
581 | |
582 def doBuild(self, buildername): | |
583 br = BuildRequest("forced", SourceStamp(), 'test_builder') | |
584 d = br.waitUntilFinished() | |
585 self.control.getBuilder(buildername).requestBuild(br) | |
586 return d | |
587 | |
588 def getRunningBuilds(self): | |
589 return len(self.status.getSlave("bot1").getRunningBuilds()) | |
590 | |
591 def testSlaveNotBusy(self): | |
592 self.failUnlessEqual(self.getRunningBuilds(), 0) | |
593 # now kick a build, wait for it to finish, then check again | |
594 d = self.doBuild("b1") | |
595 d.addCallback(self._testSlaveNotBusy_1) | |
596 return d | |
597 | |
598 def _testSlaveNotBusy_1(self, res): | |
599 self.failUnlessEqual(self.getRunningBuilds(), 0) | |
600 | |
601 def testSlaveBusyOneBuild(self): | |
602 d1 = self.doBuild("b1") | |
603 d2 = defer.Deferred() | |
604 reactor.callLater(.5, d2.callback, None) | |
605 d2.addCallback(self._testSlaveBusyOneBuild_1) | |
606 d1.addCallback(self._testSlaveBusyOneBuild_finished_1) | |
607 return defer.DeferredList([d1,d2]) | |
608 | |
609 def _testSlaveBusyOneBuild_1(self, res): | |
610 self.failUnlessEqual(self.getRunningBuilds(), 1) | |
611 | |
612 def _testSlaveBusyOneBuild_finished_1(self, res): | |
613 self.failUnlessEqual(self.getRunningBuilds(), 0) | |
614 | |
615 def testSlaveBusyTwoBuilds(self): | |
616 d1 = self.doBuild("b1") | |
617 d2 = self.doBuild("b2") | |
618 d3 = defer.Deferred() | |
619 reactor.callLater(.5, d3.callback, None) | |
620 d3.addCallback(self._testSlaveBusyTwoBuilds_1) | |
621 d1.addCallback(self._testSlaveBusyTwoBuilds_finished_1, d2) | |
622 return defer.DeferredList([d1,d3]) | |
623 | |
624 def _testSlaveBusyTwoBuilds_1(self, res): | |
625 self.failUnlessEqual(self.getRunningBuilds(), 2) | |
626 | |
627 def _testSlaveBusyTwoBuilds_finished_1(self, res, d2): | |
628 self.failUnlessEqual(self.getRunningBuilds(), 1) | |
629 d2.addCallback(self._testSlaveBusyTwoBuilds_finished_2) | |
630 return d2 | |
631 | |
632 def _testSlaveBusyTwoBuilds_finished_2(self, res): | |
633 self.failUnlessEqual(self.getRunningBuilds(), 0) | |
634 | |
635 def testSlaveDisconnect(self): | |
636 d1 = self.doBuild("b1") | |
637 d2 = defer.Deferred() | |
638 reactor.callLater(.5, d2.callback, None) | |
639 d2.addCallback(self._testSlaveDisconnect_1) | |
640 d1.addCallback(self._testSlaveDisconnect_finished_1) | |
641 return defer.DeferredList([d1, d2]) | |
642 | |
643 def _testSlaveDisconnect_1(self, res): | |
644 self.failUnlessEqual(self.getRunningBuilds(), 1) | |
645 return self.shutdownAllSlaves() | |
646 | |
647 def _testSlaveDisconnect_finished_1(self, res): | |
648 self.failUnlessEqual(self.getRunningBuilds(), 0) | |
649 | |
650 config_3 = """ | |
651 from buildbot.process import factory | |
652 from buildbot.steps import dummy | |
653 from buildbot.buildslave import BuildSlave | |
654 from buildbot.config import BuilderConfig | |
655 s = factory.s | |
656 | |
657 BuildmasterConfig = c = {} | |
658 c['slaves'] = [BuildSlave('bot1', 'sekrit')] | |
659 c['schedulers'] = [] | |
660 c['slavePortnum'] = 0 | |
661 c['schedulers'] = [] | |
662 | |
663 f1 = factory.BuildFactory([s(dummy.Wait, handle='one')]) | |
664 f2 = factory.BuildFactory([s(dummy.Wait, handle='two')]) | |
665 f3 = factory.BuildFactory([s(dummy.Wait, handle='three')]) | |
666 | |
667 c['builders'] = [ | |
668 BuilderConfig(name='b1', slavename='bot1', factory=f1), | |
669 ] | |
670 """ | |
671 | |
672 config_4 = config_3 + """ | |
673 c['builders'] = [ | |
674 BuilderConfig(name='b1', slavename='bot1', factory=f2), | |
675 ] | |
676 """ | |
677 | |
678 config_5 = config_3 + """ | |
679 c['builders'] = [ | |
680 BuilderConfig(name='b1', slavename='bot1', factory=f3), | |
681 ] | |
682 """ | |
683 | |
684 from buildbot.slave.commands import waitCommandRegistry | |
685 | |
686 class Reconfig(RunMixin, unittest.TestCase): | |
687 | |
688 def setUp(self): | |
689 RunMixin.setUp(self) | |
690 self.master.loadConfig(config_3) | |
691 self.master.startService() | |
692 d = self.connectSlave(["b1"]) | |
693 return d | |
694 | |
695 def _one_started(self): | |
696 log.msg("testReconfig._one_started") | |
697 self.build1_started = True | |
698 self.d1.callback(None) | |
699 return self.d2 | |
700 | |
701 def _two_started(self): | |
702 log.msg("testReconfig._two_started") | |
703 self.build2_started = True | |
704 self.d3.callback(None) | |
705 return self.d4 | |
706 | |
707 def _three_started(self): | |
708 log.msg("testReconfig._three_started") | |
709 self.build3_started = True | |
710 self.d5.callback(None) | |
711 return self.d6 | |
712 | |
713 def testReconfig(self): | |
714 # reconfiguring a Builder should not interrupt any running Builds. No | |
715 # queued BuildRequests should be lost. The next Build started should | |
716 # use the new process. | |
717 slave1 = self.slaves['bot1'] | |
718 bot1 = slave1.getServiceNamed('bot') | |
719 sb1 = bot1.builders['b1'] | |
720 self.failUnless(isinstance(sb1, bot.SlaveBuilder)) | |
721 self.failUnless(sb1.running) | |
722 b1 = self.master.botmaster.builders['b1'] | |
723 self.orig_b1 = b1 | |
724 | |
725 self.d1 = d1 = defer.Deferred() | |
726 self.d2 = d2 = defer.Deferred() | |
727 self.d3, self.d4 = defer.Deferred(), defer.Deferred() | |
728 self.d5, self.d6 = defer.Deferred(), defer.Deferred() | |
729 self.build1_started = False | |
730 self.build2_started = False | |
731 self.build3_started = False | |
732 waitCommandRegistry[("one","build1")] = self._one_started | |
733 waitCommandRegistry[("two","build2")] = self._two_started | |
734 waitCommandRegistry[("three","build3")] = self._three_started | |
735 | |
736 # use different branches to make sure these cannot be merged | |
737 br1 = BuildRequest("build1", SourceStamp(branch="1"), 'test_builder') | |
738 b1.submitBuildRequest(br1) | |
739 br2 = BuildRequest("build2", SourceStamp(branch="2"), 'test_builder') | |
740 b1.submitBuildRequest(br2) | |
741 br3 = BuildRequest("build3", SourceStamp(branch="3"), 'test_builder') | |
742 b1.submitBuildRequest(br3) | |
743 self.requests = (br1, br2, br3) | |
744 # all three are now in the queue | |
745 | |
746 # wait until the first one has started | |
747 d1.addCallback(self._testReconfig_2) | |
748 return d1 | |
749 | |
750 def _testReconfig_2(self, res): | |
751 log.msg("_testReconfig_2") | |
752 # confirm that it is building | |
753 brs = self.requests[0].status.getBuilds() | |
754 self.failUnlessEqual(len(brs), 1) | |
755 self.build1 = brs[0] | |
756 self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait") | |
757 # br1 is building, br2 and br3 are in the queue (in that order). Now | |
758 # we reconfigure the Builder. | |
759 self.failUnless(self.build1_started) | |
760 d = self.master.loadConfig(config_4) | |
761 d.addCallback(self._testReconfig_3) | |
762 return d | |
763 | |
764 def _testReconfig_3(self, res): | |
765 log.msg("_testReconfig_3") | |
766 # now check to see that br1 is still building, and that br2 and br3 | |
767 # are in the queue of the new builder | |
768 b1 = self.master.botmaster.builders['b1'] | |
769 self.failIfIdentical(b1, self.orig_b1) | |
770 self.failIf(self.build1.isFinished()) | |
771 self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait") | |
772 self.failUnlessEqual(len(b1.buildable), 2) | |
773 self.failUnless(self.requests[1] in b1.buildable) | |
774 self.failUnless(self.requests[2] in b1.buildable) | |
775 | |
776 # allow br1 to finish, and make sure its status is delivered normally | |
777 d = self.requests[0].waitUntilFinished() | |
778 d.addCallback(self._testReconfig_4) | |
779 self.d2.callback(None) | |
780 return d | |
781 | |
782 def _testReconfig_4(self, bs): | |
783 log.msg("_testReconfig_4") | |
784 self.failUnlessEqual(bs.getReason(), "build1") | |
785 self.failUnless(bs.isFinished()) | |
786 self.failUnlessEqual(bs.getResults(), SUCCESS) | |
787 | |
788 # at this point, the first build has finished, and there is a pending | |
789 # call to start the second build. Once that pending call fires, there | |
790 # is a network roundtrip before the 'wait' RemoteCommand is delivered | |
791 # to the slave. We need to wait for both events to happen before we | |
792 # can check to make sure it is using the correct process. Just wait a | |
793 # full second. | |
794 d = defer.Deferred() | |
795 d.addCallback(self._testReconfig_5) | |
796 reactor.callLater(1, d.callback, None) | |
797 return d | |
798 | |
799 def _testReconfig_5(self, res): | |
800 log.msg("_testReconfig_5") | |
801 # at this point the next build ought to be running | |
802 b1 = self.master.botmaster.builders['b1'] | |
803 self.failUnlessEqual(len(b1.buildable), 1) | |
804 self.failUnless(self.requests[2] in b1.buildable) | |
805 self.failUnlessEqual(len(b1.building), 1) | |
806 # and it ought to be using the new process | |
807 self.failUnless(self.build2_started) | |
808 | |
809 # now, while the second build is running, change the config multiple | |
810 # times. | |
811 | |
812 d = self.master.loadConfig(config_3) | |
813 d.addCallback(lambda res: self.master.loadConfig(config_4)) | |
814 d.addCallback(lambda res: self.master.loadConfig(config_5)) | |
815 def _done(res): | |
816 # then once that's done, allow the second build to finish and | |
817 # wait for it to complete | |
818 da = self.requests[1].waitUntilFinished() | |
819 self.d4.callback(None) | |
820 return da | |
821 d.addCallback(_done) | |
822 def _done2(res): | |
823 # and once *that*'s done, wait another second to let the third | |
824 # build start | |
825 db = defer.Deferred() | |
826 reactor.callLater(1, db.callback, None) | |
827 return db | |
828 d.addCallback(_done2) | |
829 d.addCallback(self._testReconfig_6) | |
830 return d | |
831 | |
832 def _testReconfig_6(self, res): | |
833 log.msg("_testReconfig_6") | |
834 # now check to see that the third build is running | |
835 self.failUnless(self.build3_started) | |
836 | |
837 # we're done | |
838 | |
839 | |
840 | |
841 class Slave2(RunMixin, unittest.TestCase): | |
842 | |
843 revision = 0 | |
844 | |
845 def setUp(self): | |
846 RunMixin.setUp(self) | |
847 self.master.loadConfig(config_1) | |
848 self.master.startService() | |
849 | |
850 def doBuild(self, buildername, reason="forced"): | |
851 # we need to prevent these builds from being merged, so we create | |
852 # each of them with a different revision specifier. The revision is | |
853 # ignored because our build process does not have a source checkout | |
854 # step. | |
855 self.revision += 1 | |
856 br = BuildRequest(reason, SourceStamp(revision=self.revision), | |
857 'test_builder') | |
858 d = br.waitUntilFinished() | |
859 self.control.getBuilder(buildername).requestBuild(br) | |
860 return d | |
861 | |
862 def testFirstComeFirstServed(self): | |
863 # submit three builds, then connect a slave which fails the | |
864 # slaveping. The first build will claim the slave, do the slaveping, | |
865 # give up, and re-queue the build. Verify that the build gets | |
866 # re-queued in front of all other builds. This may be tricky, because | |
867 # the other builds may attempt to claim the just-failed slave. | |
868 | |
869 d1 = self.doBuild("b1", "first") | |
870 d2 = self.doBuild("b1", "second") | |
871 #buildable = self.master.botmaster.builders["b1"].buildable | |
872 #print [b.reason for b in buildable] | |
873 | |
874 # specifically, I want the poor build to get precedence over any | |
875 # others that were waiting. To test this, we need more builds than | |
876 # slaves. | |
877 | |
878 # now connect a broken slave. The first build started as soon as it | |
879 # connects, so by the time we get to our _1 method, the ill-fated | |
880 # build has already started. | |
881 d = self.connectSlave(["b1"], opts={"failPingOnce": True}) | |
882 d.addCallback(self._testFirstComeFirstServed_1, d1, d2) | |
883 return d | |
884 def _testFirstComeFirstServed_1(self, res, d1, d2): | |
885 # the master has send the slaveping. When this is received, it will | |
886 # fail, causing the master to hang up on the slave. When it | |
887 # reconnects, it should find the first build at the front of the | |
888 # queue. If we simply wait for both builds to complete, then look at | |
889 # the status logs, we should see that the builds ran in the correct | |
890 # order. | |
891 | |
892 d = defer.DeferredList([d1,d2]) | |
893 d.addCallback(self._testFirstComeFirstServed_2) | |
894 return d | |
895 def _testFirstComeFirstServed_2(self, res): | |
896 b = self.status.getBuilder("b1") | |
897 builds = b.getBuild(0), b.getBuild(1) | |
898 reasons = [build.getReason() for build in builds] | |
899 self.failUnlessEqual(reasons, ["first", "second"]) | |
900 | |
901 config_multi_builders = config_1 + """ | |
902 c['builders'] = [ | |
903 BuilderConfig(name='dummy', factory=f2, | |
904 slavenames=['bot1', 'bot2', 'bot3']), | |
905 BuilderConfig(name='dummy2', factory=f2, | |
906 slavenames=['bot1', 'bot2', 'bot3']), | |
907 BuilderConfig(name='dummy3', factory=f2, | |
908 slavenames=['bot1', 'bot2', 'bot3']), | |
909 ] | |
910 """ | |
911 | |
912 config_mail_missing = config_1 + """ | |
913 c['slaves'] = [BuildSlave('bot1', 'sekrit', notify_on_missing='admin', | |
914 missing_timeout=1)] | |
915 c['builders'] = [ | |
916 BuilderConfig(name='dummy', slavename='bot1', factory=f1), | |
917 ] | |
918 c['projectName'] = 'myproject' | |
919 c['projectURL'] = 'myURL' | |
920 """ | |
921 | |
922 class FakeMailer(mail.MailNotifier): | |
923 def sendMessage(self, m, recipients): | |
924 self.messages.append((m,recipients)) | |
925 return defer.succeed(None) | |
926 | |
927 class BuildSlave(RunMixin, unittest.TestCase): | |
928 def test_track_builders(self): | |
929 self.master.loadConfig(config_multi_builders) | |
930 self.master.readConfig = True | |
931 self.master.startService() | |
932 d = self.connectSlave() | |
933 | |
934 def _check(res): | |
935 b = self.master.botmaster.builders['dummy'] | |
936 self.failUnless(len(b.slaves) == 1) # just bot1 | |
937 | |
938 bs = b.slaves[0].slave | |
939 self.failUnless(len(bs.slavebuilders) == 3) | |
940 self.failUnless(b in [sb.builder for sb in | |
941 bs.slavebuilders.values()]) | |
942 | |
943 d.addCallback(_check) | |
944 return d | |
945 | |
946 def test_mail_on_missing(self): | |
947 self.master.loadConfig(config_mail_missing) | |
948 self.master.readConfig = True | |
949 self.master.startService() | |
950 fm = FakeMailer("buildbot@example.org") | |
951 fm.messages = [] | |
952 fm.setServiceParent(self.master) | |
953 self.master.statusTargets.append(fm) | |
954 | |
955 d = self.connectSlave() | |
956 d.addCallback(self.stall, 1) | |
957 d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy")) | |
958 def _not_yet(res): | |
959 self.failIf(fm.messages) | |
960 d.addCallback(_not_yet) | |
961 # we reconnect right away, so the timer shouldn't fire | |
962 d.addCallback(lambda res: self.connectSlave()) | |
963 d.addCallback(self.stall, 3) | |
964 d.addCallback(_not_yet) | |
965 d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy")) | |
966 d.addCallback(_not_yet) | |
967 # now we let it sit disconnected for long enough for the timer to | |
968 # fire | |
969 d.addCallback(self.stall, 3) | |
970 def _check(res): | |
971 self.failUnlessEqual(len(fm.messages), 1) | |
972 msg,recips = fm.messages[0] | |
973 self.failUnlessEqual(recips, ["admin"]) | |
974 body = msg.as_string() | |
975 self.failUnlessIn("To: admin", body) | |
976 self.failUnlessIn("Subject: Buildbot: buildslave bot1 was lost", | |
977 body) | |
978 self.failUnlessIn("From: buildbot@example.org", body) | |
979 self.failUnlessIn("working for 'myproject'", body) | |
980 self.failUnlessIn("has noticed that the buildslave named bot1 went a
way", | |
981 body) | |
982 self.failUnlessIn("was 'one'", body) | |
983 self.failUnlessIn("myURL", body) | |
984 d.addCallback(_check) | |
985 return d | |
986 | |
987 def stall(self, result, delay=1): | |
988 d = defer.Deferred() | |
989 reactor.callLater(delay, d.callback, result) | |
990 return d | |
OLD | NEW |