Package SCons :: Module Taskmaster
[hide private]
[frames] | no frames]

Source Code for Module SCons.Taskmaster

  1  # 
  2  # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation 
  3  # 
  4  # Permission is hereby granted, free of charge, to any person obtaining 
  5  # a copy of this software and associated documentation files (the 
  6  # "Software"), to deal in the Software without restriction, including 
  7  # without limitation the rights to use, copy, modify, merge, publish, 
  8  # distribute, sublicense, and/or sell copies of the Software, and to 
  9  # permit persons to whom the Software is furnished to do so, subject to 
 10  # the following conditions: 
 11  # 
 12  # The above copyright notice and this permission notice shall be included 
 13  # in all copies or substantial portions of the Software. 
 14  # 
 15  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
 16  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
 17  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
 18  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
 19  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
 20  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
 21  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
 22  # 
 23   
 24  __doc__ = """ 
 25  Generic Taskmaster module for the SCons build engine. 
 26   
 27  This module contains the primary interface(s) between a wrapping user 
 28  interface and the SCons build engine.  There are two key classes here: 
 29   
 30      Taskmaster 
 31          This is the main engine for walking the dependency graph and 
 32          calling things to decide what does or doesn't need to be built. 
 33   
 34      Task 
 35          This is the base class for allowing a wrapping interface to 
 36          decide what does or doesn't actually need to be done.  The 
 37          intention is for a wrapping interface to subclass this as 
 38          appropriate for different types of behavior it may need. 
 39   
 40          The canonical example is the SCons native Python interface, 
 41          which has Task subclasses that handle its specific behavior, 
 42          like printing "`foo' is up to date" when a top-level target 
 43          doesn't need to be built, and handling the -c option by removing 
 44          targets as its "build" action.  There is also a separate subclass 
 45          for suppressing this output when the -q option is used. 
 46   
 47          The Taskmaster instantiates a Task object for each (set of) 
 48          target(s) that it decides need to be evaluated and/or built. 
 49  """ 
 50   
 51  __revision__ = "src/engine/SCons/Taskmaster.py 3842 2008/12/20 22:59:52 scons" 
 52   
 53  from itertools import chain 
 54  import operator 
 55  import string 
 56  import sys 
 57  import traceback 
 58   
 59  import SCons.Errors 
 60  import SCons.Node 
 61   
 62  StateString = SCons.Node.StateString 
 63  NODE_NO_STATE = SCons.Node.no_state 
 64  NODE_PENDING = SCons.Node.pending 
 65  NODE_EXECUTING = SCons.Node.executing 
 66  NODE_UP_TO_DATE = SCons.Node.up_to_date 
 67  NODE_EXECUTED = SCons.Node.executed 
 68  NODE_FAILED = SCons.Node.failed 
 69   
 70   
 71  # A subsystem for recording stats about how different Nodes are handled by 
 72  # the main Taskmaster loop.  There's no external control here (no need for 
 73  # a --debug= option); enable it by changing the value of CollectStats. 
 74   
 75  CollectStats = None 
 76   
77 -class Stats:
78 """ 79 A simple class for holding statistics about the disposition of a 80 Node by the Taskmaster. If we're collecting statistics, each Node 81 processed by the Taskmaster gets one of these attached, in which case 82 the Taskmaster records its decision each time it processes the Node. 83 (Ideally, that's just once per Node.) 84 """
85 - def __init__(self):
86 """ 87 Instantiates a Taskmaster.Stats object, initializing all 88 appropriate counters to zero. 89 """ 90 self.considered = 0 91 self.already_handled = 0 92 self.problem = 0 93 self.child_failed = 0 94 self.not_built = 0 95 self.side_effects = 0 96 self.build = 0
97 98 StatsNodes = [] 99 100 fmt = "%(considered)3d "\ 101 "%(already_handled)3d " \ 102 "%(problem)3d " \ 103 "%(child_failed)3d " \ 104 "%(not_built)3d " \ 105 "%(side_effects)3d " \ 106 "%(build)3d " 107
108 -def dump_stats():
109 StatsNodes.sort(lambda a, b: cmp(str(a), str(b))) 110 for n in StatsNodes: 111 print (fmt % n.stats.__dict__) + str(n)
112 113 114
115 -class Task:
116 """ 117 Default SCons build engine task. 118 119 This controls the interaction of the actual building of node 120 and the rest of the engine. 121 122 This is expected to handle all of the normally-customizable 123 aspects of controlling a build, so any given application 124 *should* be able to do what it wants by sub-classing this 125 class and overriding methods as appropriate. If an application 126 needs to customze something by sub-classing Taskmaster (or 127 some other build engine class), we should first try to migrate 128 that functionality into this class. 129 130 Note that it's generally a good idea for sub-classes to call 131 these methods explicitly to update state, etc., rather than 132 roll their own interaction with Taskmaster from scratch. 133 """
134 - def __init__(self, tm, targets, top, node):
135 self.tm = tm 136 self.targets = targets 137 self.top = top 138 self.node = node 139 self.exc_clear()
140
141 - def trace_message(self, method, node, description='node'):
142 fmt = '%-20s %s %s\n' 143 return fmt % (method + ':', description, self.tm.trace_node(node))
144
145 - def display(self, message):
146 """ 147 Hook to allow the calling interface to display a message. 148 149 This hook gets called as part of preparing a task for execution 150 (that is, a Node to be built). As part of figuring out what Node 151 should be built next, the actually target list may be altered, 152 along with a message describing the alteration. The calling 153 interface can subclass Task and provide a concrete implementation 154 of this method to see those messages. 155 """ 156 pass
157
158 - def prepare(self):
159 """ 160 Called just before the task is executed. 161 162 This is mainly intended to give the target Nodes a chance to 163 unlink underlying files and make all necessary directories before 164 the Action is actually called to build the targets. 165 """ 166 T = self.tm.trace 167 if T: T.write(self.trace_message('Task.prepare()', self.node)) 168 169 # Now that it's the appropriate time, give the TaskMaster a 170 # chance to raise any exceptions it encountered while preparing 171 # this task. 172 self.exception_raise() 173 174 if self.tm.message: 175 self.display(self.tm.message) 176 self.tm.message = None 177 178 # Let the targets take care of any necessary preparations. 179 # This includes verifying that all of the necessary sources 180 # and dependencies exist, removing the target file(s), etc. 181 # 182 # As of April 2008, the get_executor().prepare() method makes 183 # sure that all of the aggregate sources necessary to build this 184 # Task's target(s) exist in one up-front check. The individual 185 # target t.prepare() methods check that each target's explicit 186 # or implicit dependencies exists, and also initialize the 187 # .sconsign info. 188 self.targets[0].get_executor().prepare() 189 for t in self.targets: 190 t.prepare() 191 for s in t.side_effects: 192 s.prepare()
193
194 - def get_target(self):
195 """Fetch the target being built or updated by this task. 196 """ 197 return self.node
198
199 - def needs_execute(self):
200 """ 201 Called to determine whether the task's execute() method should 202 be run. 203 204 This method allows one to skip the somethat costly execution 205 of the execute() method in a seperate thread. For example, 206 that would be unnecessary for up-to-date targets. 207 """ 208 return True
209
210 - def execute(self):
211 """ 212 Called to execute the task. 213 214 This method is called from multiple threads in a parallel build, 215 so only do thread safe stuff here. Do thread unsafe stuff in 216 prepare(), executed() or failed(). 217 """ 218 T = self.tm.trace 219 if T: T.write(self.trace_message('Task.execute()', self.node)) 220 221 try: 222 everything_was_cached = 1 223 for t in self.targets: 224 if not t.retrieve_from_cache(): 225 everything_was_cached = 0 226 break 227 if not everything_was_cached: 228 self.targets[0].build() 229 except SystemExit: 230 exc_value = sys.exc_info()[1] 231 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) 232 except SCons.Errors.UserError: 233 raise 234 except SCons.Errors.BuildError: 235 raise 236 except Exception, e: 237 buildError = SCons.Errors.convert_to_BuildError(e) 238 buildError.node = self.targets[0] 239 buildError.exc_info = sys.exc_info() 240 raise buildError
241
243 """ 244 Called when the task has been successfully executed 245 and the Taskmaster instance doesn't want to call 246 the Node's callback methods. 247 """ 248 T = self.tm.trace 249 if T: T.write(self.trace_message('Task.executed_without_callbacks()', 250 self.node)) 251 252 for t in self.targets: 253 if t.get_state() == NODE_EXECUTING: 254 for side_effect in t.side_effects: 255 side_effect.set_state(NODE_NO_STATE) 256 t.set_state(NODE_EXECUTED)
257
258 - def executed_with_callbacks(self):
259 """ 260 Called when the task has been successfully executed and 261 the Taskmaster instance wants to call the Node's callback 262 methods. 263 264 This may have been a do-nothing operation (to preserve build 265 order), so we must check the node's state before deciding whether 266 it was "built", in which case we call the appropriate Node method. 267 In any event, we always call "visited()", which will handle any 268 post-visit actions that must take place regardless of whether 269 or not the target was an actual built target or a source Node. 270 """ 271 T = self.tm.trace 272 if T: T.write(self.trace_message('Task.executed_with_callbacks()', 273 self.node)) 274 275 for t in self.targets: 276 if t.get_state() == NODE_EXECUTING: 277 for side_effect in t.side_effects: 278 side_effect.set_state(NODE_NO_STATE) 279 t.set_state(NODE_EXECUTED) 280 t.built() 281 t.visited()
282 283 executed = executed_with_callbacks 284
285 - def failed(self):
286 """ 287 Default action when a task fails: stop the build. 288 289 Note: Although this function is normally invoked on nodes in 290 the executing state, it might also be invoked on up-to-date 291 nodes when using Configure(). 292 """ 293 self.fail_stop()
294
295 - def fail_stop(self):
296 """ 297 Explicit stop-the-build failure. 298 299 This sets failure status on the target nodes and all of 300 their dependent parent nodes. 301 302 Note: Although this function is normally invoked on nodes in 303 the executing state, it might also be invoked on up-to-date 304 nodes when using Configure(). 305 """ 306 T = self.tm.trace 307 if T: T.write(self.trace_message('Task.failed_stop()', self.node)) 308 309 # Invoke will_not_build() to clean-up the pending children 310 # list. 311 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) 312 313 # Tell the taskmaster to not start any new tasks 314 self.tm.stop() 315 316 # We're stopping because of a build failure, but give the 317 # calling Task class a chance to postprocess() the top-level 318 # target under which the build failure occurred. 319 self.targets = [self.tm.current_top] 320 self.top = 1
321
322 - def fail_continue(self):
323 """ 324 Explicit continue-the-build failure. 325 326 This sets failure status on the target nodes and all of 327 their dependent parent nodes. 328 329 Note: Although this function is normally invoked on nodes in 330 the executing state, it might also be invoked on up-to-date 331 nodes when using Configure(). 332 """ 333 T = self.tm.trace 334 if T: T.write(self.trace_message('Task.failed_continue()', self.node)) 335 336 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
337
338 - def make_ready_all(self):
339 """ 340 Marks all targets in a task ready for execution. 341 342 This is used when the interface needs every target Node to be 343 visited--the canonical example being the "scons -c" option. 344 """ 345 T = self.tm.trace 346 if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) 347 348 self.out_of_date = self.targets[:] 349 for t in self.targets: 350 t.disambiguate().set_state(NODE_EXECUTING) 351 for s in t.side_effects: 352 s.set_state(NODE_EXECUTING)
353
354 - def make_ready_current(self):
355 """ 356 Marks all targets in a task ready for execution if any target 357 is not current. 358 359 This is the default behavior for building only what's necessary. 360 """ 361 T = self.tm.trace 362 if T: T.write(self.trace_message('Task.make_ready_current()', 363 self.node)) 364 365 self.out_of_date = [] 366 needs_executing = False 367 for t in self.targets: 368 try: 369 t.disambiguate().make_ready() 370 is_up_to_date = not t.has_builder() or \ 371 (not t.always_build and t.is_up_to_date()) 372 except EnvironmentError, e: 373 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) 374 375 if not is_up_to_date: 376 self.out_of_date.append(t) 377 needs_executing = True 378 379 if needs_executing: 380 for t in self.targets: 381 t.set_state(NODE_EXECUTING) 382 for s in t.side_effects: 383 s.set_state(NODE_EXECUTING) 384 else: 385 for t in self.targets: 386 # We must invoke visited() to ensure that the node 387 # information has been computed before allowing the 388 # parent nodes to execute. (That could occur in a 389 # parallel build...) 390 t.visited() 391 t.set_state(NODE_UP_TO_DATE)
392 393 make_ready = make_ready_current 394
395 - def postprocess(self):
396 """ 397 Post-processes a task after it's been executed. 398 399 This examines all the targets just built (or not, we don't care 400 if the build was successful, or even if there was no build 401 because everything was up-to-date) to see if they have any 402 waiting parent Nodes, or Nodes waiting on a common side effect, 403 that can be put back on the candidates list. 404 """ 405 T = self.tm.trace 406 if T: T.write(self.trace_message('Task.postprocess()', self.node)) 407 408 # We may have built multiple targets, some of which may have 409 # common parents waiting for this build. Count up how many 410 # targets each parent was waiting for so we can subtract the 411 # values later, and so we *don't* put waiting side-effect Nodes 412 # back on the candidates list if the Node is also a waiting 413 # parent. 414 415 targets = set(self.targets) 416 417 pending_children = self.tm.pending_children 418 parents = {} 419 for t in targets: 420 # A node can only be in the pending_children set if it has 421 # some waiting_parents. 422 if t.waiting_parents: 423 if T: T.write(self.trace_message('Task.postprocess()', 424 t, 425 'removing')) 426 pending_children.discard(t) 427 for p in t.waiting_parents: 428 parents[p] = parents.get(p, 0) + 1 429 430 for t in targets: 431 for s in t.side_effects: 432 if s.get_state() == NODE_EXECUTING: 433 s.set_state(NODE_NO_STATE) 434 for p in s.waiting_parents: 435 parents[p] = parents.get(p, 0) + 1 436 for p in s.waiting_s_e: 437 if p.ref_count == 0: 438 self.tm.candidates.append(p) 439 440 for p, subtract in parents.items(): 441 p.ref_count = p.ref_count - subtract 442 if T: T.write(self.trace_message('Task.postprocess()', 443 p, 444 'adjusted parent ref count')) 445 if p.ref_count == 0: 446 self.tm.candidates.append(p) 447 448 for t in targets: 449 t.postprocess()
450 451 # Exception handling subsystem. 452 # 453 # Exceptions that occur while walking the DAG or examining Nodes 454 # must be raised, but must be raised at an appropriate time and in 455 # a controlled manner so we can, if necessary, recover gracefully, 456 # possibly write out signature information for Nodes we've updated, 457 # etc. This is done by having the Taskmaster tell us about the 458 # exception, and letting 459
460 - def exc_info(self):
461 """ 462 Returns info about a recorded exception. 463 """ 464 return self.exception
465
466 - def exc_clear(self):
467 """ 468 Clears any recorded exception. 469 470 This also changes the "exception_raise" attribute to point 471 to the appropriate do-nothing method. 472 """ 473 self.exception = (None, None, None) 474 self.exception_raise = self._no_exception_to_raise
475
476 - def exception_set(self, exception=None):
477 """ 478 Records an exception to be raised at the appropriate time. 479 480 This also changes the "exception_raise" attribute to point 481 to the method that will, in fact 482 """ 483 if not exception: 484 exception = sys.exc_info() 485 self.exception = exception 486 self.exception_raise = self._exception_raise
487
488 - def _no_exception_to_raise(self):
489 pass
490
491 - def _exception_raise(self):
492 """ 493 Raises a pending exception that was recorded while getting a 494 Task ready for execution. 495 """ 496 exc = self.exc_info()[:] 497 try: 498 exc_type, exc_value, exc_traceback = exc 499 except ValueError: 500 exc_type, exc_value = exc 501 exc_traceback = None 502 raise exc_type, exc_value, exc_traceback
503 504
505 -def find_cycle(stack, visited):
506 if stack[-1] in visited: 507 return None 508 visited.add(stack[-1]) 509 for n in stack[-1].waiting_parents: 510 stack.append(n) 511 if stack[0] == stack[-1]: 512 return stack 513 if find_cycle(stack, visited): 514 return stack 515 stack.pop() 516 return None
517 518
519 -class Taskmaster:
520 """ 521 The Taskmaster for walking the dependency DAG. 522 """ 523
524 - def __init__(self, targets=[], tasker=Task, order=None, trace=None):
525 self.original_top = targets 526 self.top_targets_left = targets[:] 527 self.top_targets_left.reverse() 528 self.candidates = [] 529 self.tasker = tasker 530 if not order: 531 order = lambda l: l 532 self.order = order 533 self.message = None 534 self.trace = trace 535 self.next_candidate = self.find_next_candidate 536 self.pending_children = set()
537
538 - def find_next_candidate(self):
539 """ 540 Returns the next candidate Node for (potential) evaluation. 541 542 The candidate list (really a stack) initially consists of all of 543 the top-level (command line) targets provided when the Taskmaster 544 was initialized. While we walk the DAG, visiting Nodes, all the 545 children that haven't finished processing get pushed on to the 546 candidate list. Each child can then be popped and examined in 547 turn for whether *their* children are all up-to-date, in which 548 case a Task will be created for their actual evaluation and 549 potential building. 550 551 Here is where we also allow candidate Nodes to alter the list of 552 Nodes that should be examined. This is used, for example, when 553 invoking SCons in a source directory. A source directory Node can 554 return its corresponding build directory Node, essentially saying, 555 "Hey, you really need to build this thing over here instead." 556 """ 557 try: 558 return self.candidates.pop() 559 except IndexError: 560 pass 561 try: 562 node = self.top_targets_left.pop() 563 except IndexError: 564 return None 565 self.current_top = node 566 alt, message = node.alter_targets() 567 if alt: 568 self.message = message 569 self.candidates.append(node) 570 self.candidates.extend(self.order(alt)) 571 node = self.candidates.pop() 572 return node
573
574 - def no_next_candidate(self):
575 """ 576 Stops Taskmaster processing by not returning a next candidate. 577 578 Note that we have to clean-up the Taskmaster candidate list 579 because the cycle detection depends on the fact all nodes have 580 been processed somehow. 581 """ 582 while self.candidates: 583 candidates = self.candidates 584 self.candidates = [] 585 self.will_not_build(candidates) 586 return None
587
589 """ 590 Validate the content of the pending_children set. Assert if an 591 internal error is found. 592 593 This function is used strictly for debugging the taskmaster by 594 checking that no invariants are violated. It is not used in 595 normal operation. 596 597 The pending_children set is used to detect cycles in the 598 dependency graph. We call a "pending child" a child that is 599 found in the "pending" state when checking the dependencies of 600 its parent node. 601 602 A pending child can occur when the Taskmaster completes a loop 603 through a cycle. For example, lets imagine a graph made of 604 three node (A, B and C) making a cycle. The evaluation starts 605 at node A. The taskmaster first consider whether node A's 606 child B is up-to-date. Then, recursively, node B needs to 607 check whether node C is up-to-date. This leaves us with a 608 dependency graph looking like: 609 610 Next candidate \ 611 \ 612 Node A (Pending) --> Node B(Pending) --> Node C (NoState) 613 ^ | 614 | | 615 +-------------------------------------+ 616 617 Now, when the Taskmaster examines the Node C's child Node A, 618 it finds that Node A is in the "pending" state. Therefore, 619 Node A is a pending child of node C. 620 621 Pending children indicate that the Taskmaster has potentially 622 loop back through a cycle. We say potentially because it could 623 also occur when a DAG is evaluated in parallel. For example, 624 consider the following graph: 625 626 627 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... 628 | ^ 629 | | 630 +----------> Node D (NoState) --------+ 631 / 632 Next candidate / 633 634 The Taskmaster first evaluates the nodes A, B, and C and 635 starts building some children of node C. Assuming, that the 636 maximum parallel level has not been reached, the Taskmaster 637 will examine Node D. It will find that Node C is a pending 638 child of Node D. 639 640 In summary, evaluating a graph with a cycle will always 641 involve a pending child at one point. A pending child might 642 indicate either a cycle or a diamond-shaped DAG. Only a 643 fraction of the nodes ends-up being a "pending child" of 644 another node. This keeps the pending_children set small in 645 practice. 646 647 We can differentiate between the two cases if we wait until 648 the end of the build. At this point, all the pending children 649 nodes due to a diamond-shaped DAG will have been properly 650 built (or will have failed to build). But, the pending 651 children involved in a cycle will still be in the pending 652 state. 653 654 The taskmaster removes nodes from the pending_children set as 655 soon as a pending_children node moves out of the pending 656 state. This also helps to keep the pending_children set small. 657 """ 658 659 for n in self.pending_children: 660 assert n.state in (NODE_PENDING, NODE_EXECUTING), \ 661 (str(n), StateString[n.state]) 662 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) 663 for p in n.waiting_parents: 664 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
665 666
667 - def trace_message(self, message):
668 return 'Taskmaster: %s\n' % message
669
670 - def trace_node(self, node):
671 return '<%-10s %-3s %s>' % (StateString[node.get_state()], 672 node.ref_count, 673 repr(str(node)))
674
675 - def _find_next_ready_node(self):
676 """ 677 Finds the next node that is ready to be built. 678 679 This is *the* main guts of the DAG walk. We loop through the 680 list of candidates, looking for something that has no un-built 681 children (i.e., that is a leaf Node or has dependencies that are 682 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned 683 (both the target Node itself and its sources, which are always 684 scanned in the context of a given target) to discover implicit 685 dependencies. A Node that must wait for some children to be 686 built will be put back on the candidates list after the children 687 have finished building. A Node that has been put back on the 688 candidates list in this way may have itself (or its sources) 689 re-scanned, in order to handle generated header files (e.g.) and 690 the implicit dependencies therein. 691 692 Note that this method does not do any signature calculation or 693 up-to-date check itself. All of that is handled by the Task 694 class. This is purely concerned with the dependency graph walk. 695 """ 696 697 self.ready_exc = None 698 699 T = self.trace 700 if T: T.write('\n' + self.trace_message('Looking for a node to evaluate')) 701 702 while 1: 703 node = self.next_candidate() 704 if node is None: 705 if T: T.write(self.trace_message('No candidate anymore.') + '\n') 706 return None 707 708 node = node.disambiguate() 709 state = node.get_state() 710 711 # For debugging only: 712 # 713 # try: 714 # self._validate_pending_children() 715 # except: 716 # self.ready_exc = sys.exc_info() 717 # return node 718 719 if CollectStats: 720 if not hasattr(node, 'stats'): 721 node.stats = Stats() 722 StatsNodes.append(node) 723 S = node.stats 724 S.considered = S.considered + 1 725 else: 726 S = None 727 728 if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node))) 729 730 if state == NODE_NO_STATE: 731 # Mark this node as being on the execution stack: 732 node.set_state(NODE_PENDING) 733 elif state > NODE_PENDING: 734 # Skip this node if it has already been evaluated: 735 if S: S.already_handled = S.already_handled + 1 736 if T: T.write(self.trace_message(' already handled (executed)')) 737 continue 738 739 try: 740 children = node.children() 741 except SystemExit: 742 exc_value = sys.exc_info()[1] 743 e = SCons.Errors.ExplicitExit(node, exc_value.code) 744 self.ready_exc = (SCons.Errors.ExplicitExit, e) 745 if T: T.write(self.trace_message(' SystemExit')) 746 return node 747 except Exception, e: 748 # We had a problem just trying to figure out the 749 # children (like a child couldn't be linked in to a 750 # VariantDir, or a Scanner threw something). Arrange to 751 # raise the exception when the Task is "executed." 752 self.ready_exc = sys.exc_info() 753 if S: S.problem = S.problem + 1 754 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) 755 return node 756 757 children_not_visited = [] 758 children_pending = set() 759 children_not_ready = [] 760 children_failed = False 761 762 for child in chain(children,node.prerequisites): 763 childstate = child.get_state() 764 765 if T: T.write(self.trace_message(' ' + self.trace_node(child))) 766 767 if childstate == NODE_NO_STATE: 768 children_not_visited.append(child) 769 elif childstate == NODE_PENDING: 770 children_pending.add(child) 771 elif childstate == NODE_FAILED: 772 children_failed = True 773 774 if childstate <= NODE_EXECUTING: 775 children_not_ready.append(child) 776 777 778 # These nodes have not even been visited yet. Add 779 # them to the list so that on some next pass we can 780 # take a stab at evaluating them (or their children). 781 children_not_visited.reverse() 782 self.candidates.extend(self.order(children_not_visited)) 783 #if T and children_not_visited: 784 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) 785 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) 786 787 # Skip this node if any of its children have failed. 788 # 789 # This catches the case where we're descending a top-level 790 # target and one of our children failed while trying to be 791 # built by a *previous* descent of an earlier top-level 792 # target. 793 # 794 # It can also occur if a node is reused in multiple 795 # targets. One first descends though the one of the 796 # target, the next time occurs through the other target. 797 # 798 # Note that we can only have failed_children if the 799 # --keep-going flag was used, because without it the build 800 # will stop before diving in the other branch. 801 # 802 # Note that even if one of the children fails, we still 803 # added the other children to the list of candidate nodes 804 # to keep on building (--keep-going). 805 if children_failed: 806 node.set_state(NODE_FAILED) 807 808 if S: S.child_failed = S.child_failed + 1 809 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) 810 continue 811 812 if children_not_ready: 813 for child in children_not_ready: 814 # We're waiting on one or more derived targets 815 # that have not yet finished building. 816 if S: S.not_built = S.not_built + 1 817 818 # Add this node to the waiting parents lists of 819 # anything we're waiting on, with a reference 820 # count so we can be put back on the list for 821 # re-evaluation when they've all finished. 822 node.ref_count = node.ref_count + child.add_to_waiting_parents(node) 823 if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' % 824 (self.trace_node(node), repr(str(child))))) 825 826 if T: 827 for pc in children_pending: 828 T.write(self.trace_message(' adding %s to the pending children set\n' % 829 self.trace_node(pc))) 830 self.pending_children = self.pending_children | children_pending 831 832 continue 833 834 # Skip this node if it has side-effects that are 835 # currently being built: 836 wait_side_effects = False 837 for se in node.side_effects: 838 if se.get_state() == NODE_EXECUTING: 839 se.add_to_waiting_s_e(node) 840 wait_side_effects = True 841 842 if wait_side_effects: 843 if S: S.side_effects = S.side_effects + 1 844 continue 845 846 # The default when we've gotten through all of the checks above: 847 # this node is ready to be built. 848 if S: S.build = S.build + 1 849 if T: T.write(self.trace_message('Evaluating %s\n' % 850 self.trace_node(node))) 851 852 # For debugging only: 853 # 854 # try: 855 # self._validate_pending_children() 856 # except: 857 # self.ready_exc = sys.exc_info() 858 # return node 859 860 return node 861 862 return None
863
864 - def next_task(self):
865 """ 866 Returns the next task to be executed. 867 868 This simply asks for the next Node to be evaluated, and then wraps 869 it in the specific Task subclass with which we were initialized. 870 """ 871 node = self._find_next_ready_node() 872 873 if node is None: 874 return None 875 876 tlist = node.get_executor().targets 877 878 task = self.tasker(self, tlist, node in self.original_top, node) 879 try: 880 task.make_ready() 881 except: 882 # We had a problem just trying to get this task ready (like 883 # a child couldn't be linked in to a VariantDir when deciding 884 # whether this node is current). Arrange to raise the 885 # exception when the Task is "executed." 886 self.ready_exc = sys.exc_info() 887 888 if self.ready_exc: 889 task.exception_set(self.ready_exc) 890 891 self.ready_exc = None 892 893 return task
894
895 - def will_not_build(self, nodes, node_func=lambda n: None):
896 """ 897 Perform clean-up about nodes that will never be built. Invokes 898 a user defined function on all of these nodes (including all 899 of their parents). 900 """ 901 902 T = self.trace 903 904 pending_children = self.pending_children 905 906 to_visit = set(nodes) 907 pending_children = pending_children - to_visit 908 909 if T: 910 for n in nodes: 911 T.write(self.trace_message(' removing node %s from the pending children set\n' % 912 self.trace_node(n))) 913 try: 914 while 1: 915 try: 916 node = to_visit.pop() 917 except AttributeError: 918 # Python 1.5.2 919 if len(to_visit): 920 node = to_visit[0] 921 to_visit.remove(node) 922 else: 923 break 924 925 node_func(node) 926 927 # Prune recursion by flushing the waiting children 928 # list immediately. 929 parents = node.waiting_parents 930 node.waiting_parents = set() 931 932 to_visit = to_visit | parents 933 pending_children = pending_children - parents 934 935 for p in parents: 936 p.ref_count = p.ref_count - 1 937 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % 938 self.trace_node(p))) 939 except KeyError: 940 # The container to_visit has been emptied. 941 pass 942 943 # We have the stick back the pending_children list into the 944 # task master because the python 1.5.2 compatibility does not 945 # allow us to use in-place updates 946 self.pending_children = pending_children
947
948 - def stop(self):
949 """ 950 Stops the current build completely. 951 """ 952 self.next_candidate = self.no_next_candidate
953
954 - def cleanup(self):
955 """ 956 Check for dependency cycles. 957 """ 958 if not self.pending_children: 959 return 960 961 # TODO(1.5) 962 #nclist = [ (n, find_cycle([n], set())) for n in self.pending_children ] 963 nclist = map(lambda n: (n, find_cycle([n], set())), self.pending_children) 964 965 # TODO(1.5) 966 #genuine_cycles = [ 967 # node for node, cycle in nclist 968 # if cycle or node.get_state() != NODE_EXECUTED 969 #] 970 genuine_cycles = filter(lambda t: t[1] or t[0].get_state() != NODE_EXECUTED, nclist) 971 if not genuine_cycles: 972 # All of the "cycles" found were single nodes in EXECUTED state, 973 # which is to say, they really weren't cycles. Just return. 974 return 975 976 desc = 'Found dependency cycle(s):\n' 977 for node, cycle in nclist: 978 if cycle: 979 desc = desc + " " + string.join(map(str, cycle), " -> ") + "\n" 980 else: 981 desc = desc + \ 982 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ 983 (node, repr(node), StateString[node.get_state()]) 984 985 raise SCons.Errors.UserError, desc
986