Package SCons :: Module Taskmaster
[hide private]
[frames] | no frames]

Source Code for Module SCons.Taskmaster

   1  # 
   2  # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation 
   3  # 
   4  # Permission is hereby granted, free of charge, to any person obtaining 
   5  # a copy of this software and associated documentation files (the 
   6  # "Software"), to deal in the Software without restriction, including 
   7  # without limitation the rights to use, copy, modify, merge, publish, 
   8  # distribute, sublicense, and/or sell copies of the Software, and to 
   9  # permit persons to whom the Software is furnished to do so, subject to 
  10  # the following conditions: 
  11  # 
  12  # The above copyright notice and this permission notice shall be included 
  13  # in all copies or substantial portions of the Software. 
  14  # 
  15  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  16  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  17  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  18  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  19  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  20  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  21  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  22   
  23  __doc__ = """ 
  24  Generic Taskmaster module for the SCons build engine. 
  25   
  26  This module contains the primary interface(s) between a wrapping user 
  27  interface and the SCons build engine.  There are two key classes here: 
  28   
  29      Taskmaster 
  30          This is the main engine for walking the dependency graph and 
  31          calling things to decide what does or doesn't need to be built. 
  32   
  33      Task 
  34          This is the base class for allowing a wrapping interface to 
  35          decide what does or doesn't actually need to be done.  The 
  36          intention is for a wrapping interface to subclass this as 
  37          appropriate for different types of behavior it may need. 
  38   
  39          The canonical example is the SCons native Python interface, 
  40          which has Task subclasses that handle its specific behavior, 
  41          like printing "`foo' is up to date" when a top-level target 
  42          doesn't need to be built, and handling the -c option by removing 
  43          targets as its "build" action.  There is also a separate subclass 
  44          for suppressing this output when the -q option is used. 
  45   
  46          The Taskmaster instantiates a Task object for each (set of) 
  47          target(s) that it decides need to be evaluated and/or built. 
  48  """ 
  49   
  50  __revision__ = "src/engine/SCons/Taskmaster.py 5357 2011/09/09 21:31:03 bdeegan" 
  51   
  52  from itertools import chain 
  53  import operator 
  54  import sys 
  55  import traceback 
  56   
  57  import SCons.Errors 
  58  import SCons.Node 
  59  import SCons.Warnings 
  60   
  61  StateString = SCons.Node.StateString 
  62  NODE_NO_STATE = SCons.Node.no_state 
  63  NODE_PENDING = SCons.Node.pending 
  64  NODE_EXECUTING = SCons.Node.executing 
  65  NODE_UP_TO_DATE = SCons.Node.up_to_date 
  66  NODE_EXECUTED = SCons.Node.executed 
  67  NODE_FAILED = SCons.Node.failed 
  68   
  69  print_prepare = 0               # set by option --debug=prepare 
  70   
  71  # A subsystem for recording stats about how different Nodes are handled by 
  72  # the main Taskmaster loop.  There's no external control here (no need for 
  73  # a --debug= option); enable it by changing the value of CollectStats. 
  74   
  75  CollectStats = None 
  76   
77 -class Stats(object):
78 """ 79 A simple class for holding statistics about the disposition of a 80 Node by the Taskmaster. If we're collecting statistics, each Node 81 processed by the Taskmaster gets one of these attached, in which case 82 the Taskmaster records its decision each time it processes the Node. 83 (Ideally, that's just once per Node.) 84 """
85 - def __init__(self):
86 """ 87 Instantiates a Taskmaster.Stats object, initializing all 88 appropriate counters to zero. 89 """ 90 self.considered = 0 91 self.already_handled = 0 92 self.problem = 0 93 self.child_failed = 0 94 self.not_built = 0 95 self.side_effects = 0 96 self.build = 0
97 98 StatsNodes = [] 99 100 fmt = "%(considered)3d "\ 101 "%(already_handled)3d " \ 102 "%(problem)3d " \ 103 "%(child_failed)3d " \ 104 "%(not_built)3d " \ 105 "%(side_effects)3d " \ 106 "%(build)3d " 107
108 -def dump_stats():
109 for n in sorted(StatsNodes, key=lambda a: str(a)): 110 print (fmt % n.stats.__dict__) + str(n)
111 112 113
114 -class Task(object):
115 """ 116 Default SCons build engine task. 117 118 This controls the interaction of the actual building of node 119 and the rest of the engine. 120 121 This is expected to handle all of the normally-customizable 122 aspects of controlling a build, so any given application 123 *should* be able to do what it wants by sub-classing this 124 class and overriding methods as appropriate. If an application 125 needs to customze something by sub-classing Taskmaster (or 126 some other build engine class), we should first try to migrate 127 that functionality into this class. 128 129 Note that it's generally a good idea for sub-classes to call 130 these methods explicitly to update state, etc., rather than 131 roll their own interaction with Taskmaster from scratch. 132 """
133 - def __init__(self, tm, targets, top, node):
134 self.tm = tm 135 self.targets = targets 136 self.top = top 137 self.node = node 138 self.exc_clear()
139
140 - def trace_message(self, method, node, description='node'):
141 fmt = '%-20s %s %s\n' 142 return fmt % (method + ':', description, self.tm.trace_node(node))
143
144 - def display(self, message):
145 """ 146 Hook to allow the calling interface to display a message. 147 148 This hook gets called as part of preparing a task for execution 149 (that is, a Node to be built). As part of figuring out what Node 150 should be built next, the actually target list may be altered, 151 along with a message describing the alteration. The calling 152 interface can subclass Task and provide a concrete implementation 153 of this method to see those messages. 154 """ 155 pass
156
157 - def prepare(self):
158 """ 159 Called just before the task is executed. 160 161 This is mainly intended to give the target Nodes a chance to 162 unlink underlying files and make all necessary directories before 163 the Action is actually called to build the targets. 164 """ 165 global print_prepare 166 T = self.tm.trace 167 if T: T.write(self.trace_message(u'Task.prepare()', self.node)) 168 169 # Now that it's the appropriate time, give the TaskMaster a 170 # chance to raise any exceptions it encountered while preparing 171 # this task. 172 self.exception_raise() 173 174 if self.tm.message: 175 self.display(self.tm.message) 176 self.tm.message = None 177 178 # Let the targets take care of any necessary preparations. 179 # This includes verifying that all of the necessary sources 180 # and dependencies exist, removing the target file(s), etc. 181 # 182 # As of April 2008, the get_executor().prepare() method makes 183 # sure that all of the aggregate sources necessary to build this 184 # Task's target(s) exist in one up-front check. The individual 185 # target t.prepare() methods check that each target's explicit 186 # or implicit dependencies exists, and also initialize the 187 # .sconsign info. 188 executor = self.targets[0].get_executor() 189 executor.prepare() 190 for t in executor.get_action_targets(): 191 if print_prepare: 192 print "Preparing target %s..."%t 193 for s in t.side_effects: 194 print "...with side-effect %s..."%s 195 t.prepare() 196 for s in t.side_effects: 197 if print_prepare: 198 print "...Preparing side-effect %s..."%s 199 s.prepare()
200
201 - def get_target(self):
202 """Fetch the target being built or updated by this task. 203 """ 204 return self.node
205
206 - def needs_execute(self):
207 # TODO(deprecate): "return True" is the old default behavior; 208 # change it to NotImplementedError (after running through the 209 # Deprecation Cycle) so the desired behavior is explicitly 210 # determined by which concrete subclass is used. 211 #raise NotImplementedError 212 msg = ('Taskmaster.Task is an abstract base class; instead of\n' 213 '\tusing it directly, ' 214 'derive from it and override the abstract methods.') 215 SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg) 216 return True
217
218 - def execute(self):
219 """ 220 Called to execute the task. 221 222 This method is called from multiple threads in a parallel build, 223 so only do thread safe stuff here. Do thread unsafe stuff in 224 prepare(), executed() or failed(). 225 """ 226 T = self.tm.trace 227 if T: T.write(self.trace_message(u'Task.execute()', self.node)) 228 229 try: 230 everything_was_cached = 1 231 for t in self.targets: 232 if t.retrieve_from_cache(): 233 # Call the .built() method without calling the 234 # .push_to_cache() method, since we just got the 235 # target from the cache and don't need to push 236 # it back there. 237 t.set_state(NODE_EXECUTED) 238 t.built() 239 else: 240 everything_was_cached = 0 241 break 242 if not everything_was_cached: 243 self.targets[0].build() 244 except SystemExit: 245 exc_value = sys.exc_info()[1] 246 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) 247 except SCons.Errors.UserError: 248 raise 249 except SCons.Errors.BuildError: 250 raise 251 except Exception, e: 252 buildError = SCons.Errors.convert_to_BuildError(e) 253 buildError.node = self.targets[0] 254 buildError.exc_info = sys.exc_info() 255 raise buildError
256
257 - def executed_without_callbacks(self):
258 """ 259 Called when the task has been successfully executed 260 and the Taskmaster instance doesn't want to call 261 the Node's callback methods. 262 """ 263 T = self.tm.trace 264 if T: T.write(self.trace_message('Task.executed_without_callbacks()', 265 self.node)) 266 267 for t in self.targets: 268 if t.get_state() == NODE_EXECUTING: 269 for side_effect in t.side_effects: 270 side_effect.set_state(NODE_NO_STATE) 271 t.set_state(NODE_EXECUTED)
272
273 - def executed_with_callbacks(self):
274 """ 275 Called when the task has been successfully executed and 276 the Taskmaster instance wants to call the Node's callback 277 methods. 278 279 This may have been a do-nothing operation (to preserve build 280 order), so we must check the node's state before deciding whether 281 it was "built", in which case we call the appropriate Node method. 282 In any event, we always call "visited()", which will handle any 283 post-visit actions that must take place regardless of whether 284 or not the target was an actual built target or a source Node. 285 """ 286 T = self.tm.trace 287 if T: T.write(self.trace_message('Task.executed_with_callbacks()', 288 self.node)) 289 290 for t in self.targets: 291 if t.get_state() == NODE_EXECUTING: 292 for side_effect in t.side_effects: 293 side_effect.set_state(NODE_NO_STATE) 294 t.set_state(NODE_EXECUTED) 295 t.push_to_cache() 296 t.built() 297 t.visited()
298 299 executed = executed_with_callbacks 300
301 - def failed(self):
302 """ 303 Default action when a task fails: stop the build. 304 305 Note: Although this function is normally invoked on nodes in 306 the executing state, it might also be invoked on up-to-date 307 nodes when using Configure(). 308 """ 309 self.fail_stop()
310
311 - def fail_stop(self):
312 """ 313 Explicit stop-the-build failure. 314 315 This sets failure status on the target nodes and all of 316 their dependent parent nodes. 317 318 Note: Although this function is normally invoked on nodes in 319 the executing state, it might also be invoked on up-to-date 320 nodes when using Configure(). 321 """ 322 T = self.tm.trace 323 if T: T.write(self.trace_message('Task.failed_stop()', self.node)) 324 325 # Invoke will_not_build() to clean-up the pending children 326 # list. 327 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) 328 329 # Tell the taskmaster to not start any new tasks 330 self.tm.stop() 331 332 # We're stopping because of a build failure, but give the 333 # calling Task class a chance to postprocess() the top-level 334 # target under which the build failure occurred. 335 self.targets = [self.tm.current_top] 336 self.top = 1
337
338 - def fail_continue(self):
339 """ 340 Explicit continue-the-build failure. 341 342 This sets failure status on the target nodes and all of 343 their dependent parent nodes. 344 345 Note: Although this function is normally invoked on nodes in 346 the executing state, it might also be invoked on up-to-date 347 nodes when using Configure(). 348 """ 349 T = self.tm.trace 350 if T: T.write(self.trace_message('Task.failed_continue()', self.node)) 351 352 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
353
354 - def make_ready_all(self):
355 """ 356 Marks all targets in a task ready for execution. 357 358 This is used when the interface needs every target Node to be 359 visited--the canonical example being the "scons -c" option. 360 """ 361 T = self.tm.trace 362 if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) 363 364 self.out_of_date = self.targets[:] 365 for t in self.targets: 366 t.disambiguate().set_state(NODE_EXECUTING) 367 for s in t.side_effects: 368 # add disambiguate here to mirror the call on targets above 369 s.disambiguate().set_state(NODE_EXECUTING)
370
371 - def make_ready_current(self):
372 """ 373 Marks all targets in a task ready for execution if any target 374 is not current. 375 376 This is the default behavior for building only what's necessary. 377 """ 378 T = self.tm.trace 379 if T: T.write(self.trace_message(u'Task.make_ready_current()', 380 self.node)) 381 382 self.out_of_date = [] 383 needs_executing = False 384 for t in self.targets: 385 try: 386 t.disambiguate().make_ready() 387 is_up_to_date = not t.has_builder() or \ 388 (not t.always_build and t.is_up_to_date()) 389 except EnvironmentError, e: 390 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) 391 392 if not is_up_to_date: 393 self.out_of_date.append(t) 394 needs_executing = True 395 396 if needs_executing: 397 for t in self.targets: 398 t.set_state(NODE_EXECUTING) 399 for s in t.side_effects: 400 # add disambiguate here to mirror the call on targets in first loop above 401 s.disambiguate().set_state(NODE_EXECUTING) 402 else: 403 for t in self.targets: 404 # We must invoke visited() to ensure that the node 405 # information has been computed before allowing the 406 # parent nodes to execute. (That could occur in a 407 # parallel build...) 408 t.visited() 409 t.set_state(NODE_UP_TO_DATE)
410 411 make_ready = make_ready_current 412
413 - def postprocess(self):
414 """ 415 Post-processes a task after it's been executed. 416 417 This examines all the targets just built (or not, we don't care 418 if the build was successful, or even if there was no build 419 because everything was up-to-date) to see if they have any 420 waiting parent Nodes, or Nodes waiting on a common side effect, 421 that can be put back on the candidates list. 422 """ 423 T = self.tm.trace 424 if T: T.write(self.trace_message(u'Task.postprocess()', self.node)) 425 426 # We may have built multiple targets, some of which may have 427 # common parents waiting for this build. Count up how many 428 # targets each parent was waiting for so we can subtract the 429 # values later, and so we *don't* put waiting side-effect Nodes 430 # back on the candidates list if the Node is also a waiting 431 # parent. 432 433 targets = set(self.targets) 434 435 pending_children = self.tm.pending_children 436 parents = {} 437 for t in targets: 438 # A node can only be in the pending_children set if it has 439 # some waiting_parents. 440 if t.waiting_parents: 441 if T: T.write(self.trace_message(u'Task.postprocess()', 442 t, 443 'removing')) 444 pending_children.discard(t) 445 for p in t.waiting_parents: 446 parents[p] = parents.get(p, 0) + 1 447 448 for t in targets: 449 for s in t.side_effects: 450 if s.get_state() == NODE_EXECUTING: 451 s.set_state(NODE_NO_STATE) 452 for p in s.waiting_parents: 453 parents[p] = parents.get(p, 0) + 1 454 for p in s.waiting_s_e: 455 if p.ref_count == 0: 456 self.tm.candidates.append(p) 457 458 for p, subtract in parents.items(): 459 p.ref_count = p.ref_count - subtract 460 if T: T.write(self.trace_message(u'Task.postprocess()', 461 p, 462 'adjusted parent ref count')) 463 if p.ref_count == 0: 464 self.tm.candidates.append(p) 465 466 for t in targets: 467 t.postprocess()
468 469 # Exception handling subsystem. 470 # 471 # Exceptions that occur while walking the DAG or examining Nodes 472 # must be raised, but must be raised at an appropriate time and in 473 # a controlled manner so we can, if necessary, recover gracefully, 474 # possibly write out signature information for Nodes we've updated, 475 # etc. This is done by having the Taskmaster tell us about the 476 # exception, and letting 477
478 - def exc_info(self):
479 """ 480 Returns info about a recorded exception. 481 """ 482 return self.exception
483
484 - def exc_clear(self):
485 """ 486 Clears any recorded exception. 487 488 This also changes the "exception_raise" attribute to point 489 to the appropriate do-nothing method. 490 """ 491 self.exception = (None, None, None) 492 self.exception_raise = self._no_exception_to_raise
493
494 - def exception_set(self, exception=None):
495 """ 496 Records an exception to be raised at the appropriate time. 497 498 This also changes the "exception_raise" attribute to point 499 to the method that will, in fact 500 """ 501 if not exception: 502 exception = sys.exc_info() 503 self.exception = exception 504 self.exception_raise = self._exception_raise
505
506 - def _no_exception_to_raise(self):
507 pass
508
509 - def _exception_raise(self):
510 """ 511 Raises a pending exception that was recorded while getting a 512 Task ready for execution. 513 """ 514 exc = self.exc_info()[:] 515 try: 516 exc_type, exc_value, exc_traceback = exc 517 except ValueError: 518 exc_type, exc_value = exc 519 exc_traceback = None 520 raise exc_type, exc_value, exc_traceback
521
522 -class AlwaysTask(Task):
523 - def needs_execute(self):
524 """ 525 Always returns True (indicating this Task should always 526 be executed). 527 528 Subclasses that need this behavior (as opposed to the default 529 of only executing Nodes that are out of date w.r.t. their 530 dependencies) can use this as follows: 531 532 class MyTaskSubclass(SCons.Taskmaster.Task): 533 needs_execute = SCons.Taskmaster.Task.execute_always 534 """ 535 return True
536
537 -class OutOfDateTask(Task):
538 - def needs_execute(self):
539 """ 540 Returns True (indicating this Task should be executed) if this 541 Task's target state indicates it needs executing, which has 542 already been determined by an earlier up-to-date check. 543 """ 544 return self.targets[0].get_state() == SCons.Node.executing
545 546
547 -def find_cycle(stack, visited):
548 if stack[-1] in visited: 549 return None 550 visited.add(stack[-1]) 551 for n in stack[-1].waiting_parents: 552 stack.append(n) 553 if stack[0] == stack[-1]: 554 return stack 555 if find_cycle(stack, visited): 556 return stack 557 stack.pop() 558 return None
559 560
561 -class Taskmaster(object):
562 """ 563 The Taskmaster for walking the dependency DAG. 564 """ 565
566 - def __init__(self, targets=[], tasker=None, order=None, trace=None):
567 self.original_top = targets 568 self.top_targets_left = targets[:] 569 self.top_targets_left.reverse() 570 self.candidates = [] 571 if tasker is None: 572 tasker = OutOfDateTask 573 self.tasker = tasker 574 if not order: 575 order = lambda l: l 576 self.order = order 577 self.message = None 578 self.trace = trace 579 self.next_candidate = self.find_next_candidate 580 self.pending_children = set()
581
582 - def find_next_candidate(self):
583 """ 584 Returns the next candidate Node for (potential) evaluation. 585 586 The candidate list (really a stack) initially consists of all of 587 the top-level (command line) targets provided when the Taskmaster 588 was initialized. While we walk the DAG, visiting Nodes, all the 589 children that haven't finished processing get pushed on to the 590 candidate list. Each child can then be popped and examined in 591 turn for whether *their* children are all up-to-date, in which 592 case a Task will be created for their actual evaluation and 593 potential building. 594 595 Here is where we also allow candidate Nodes to alter the list of 596 Nodes that should be examined. This is used, for example, when 597 invoking SCons in a source directory. A source directory Node can 598 return its corresponding build directory Node, essentially saying, 599 "Hey, you really need to build this thing over here instead." 600 """ 601 try: 602 return self.candidates.pop() 603 except IndexError: 604 pass 605 try: 606 node = self.top_targets_left.pop() 607 except IndexError: 608 return None 609 self.current_top = node 610 alt, message = node.alter_targets() 611 if alt: 612 self.message = message 613 self.candidates.append(node) 614 self.candidates.extend(self.order(alt)) 615 node = self.candidates.pop() 616 return node
617
618 - def no_next_candidate(self):
619 """ 620 Stops Taskmaster processing by not returning a next candidate. 621 622 Note that we have to clean-up the Taskmaster candidate list 623 because the cycle detection depends on the fact all nodes have 624 been processed somehow. 625 """ 626 while self.candidates: 627 candidates = self.candidates 628 self.candidates = [] 629 self.will_not_build(candidates) 630 return None
631
632 - def _validate_pending_children(self):
633 """ 634 Validate the content of the pending_children set. Assert if an 635 internal error is found. 636 637 This function is used strictly for debugging the taskmaster by 638 checking that no invariants are violated. It is not used in 639 normal operation. 640 641 The pending_children set is used to detect cycles in the 642 dependency graph. We call a "pending child" a child that is 643 found in the "pending" state when checking the dependencies of 644 its parent node. 645 646 A pending child can occur when the Taskmaster completes a loop 647 through a cycle. For example, lets imagine a graph made of 648 three node (A, B and C) making a cycle. The evaluation starts 649 at node A. The taskmaster first consider whether node A's 650 child B is up-to-date. Then, recursively, node B needs to 651 check whether node C is up-to-date. This leaves us with a 652 dependency graph looking like: 653 654 Next candidate \ 655 \ 656 Node A (Pending) --> Node B(Pending) --> Node C (NoState) 657 ^ | 658 | | 659 +-------------------------------------+ 660 661 Now, when the Taskmaster examines the Node C's child Node A, 662 it finds that Node A is in the "pending" state. Therefore, 663 Node A is a pending child of node C. 664 665 Pending children indicate that the Taskmaster has potentially 666 loop back through a cycle. We say potentially because it could 667 also occur when a DAG is evaluated in parallel. For example, 668 consider the following graph: 669 670 671 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... 672 | ^ 673 | | 674 +----------> Node D (NoState) --------+ 675 / 676 Next candidate / 677 678 The Taskmaster first evaluates the nodes A, B, and C and 679 starts building some children of node C. Assuming, that the 680 maximum parallel level has not been reached, the Taskmaster 681 will examine Node D. It will find that Node C is a pending 682 child of Node D. 683 684 In summary, evaluating a graph with a cycle will always 685 involve a pending child at one point. A pending child might 686 indicate either a cycle or a diamond-shaped DAG. Only a 687 fraction of the nodes ends-up being a "pending child" of 688 another node. This keeps the pending_children set small in 689 practice. 690 691 We can differentiate between the two cases if we wait until 692 the end of the build. At this point, all the pending children 693 nodes due to a diamond-shaped DAG will have been properly 694 built (or will have failed to build). But, the pending 695 children involved in a cycle will still be in the pending 696 state. 697 698 The taskmaster removes nodes from the pending_children set as 699 soon as a pending_children node moves out of the pending 700 state. This also helps to keep the pending_children set small. 701 """ 702 703 for n in self.pending_children: 704 assert n.state in (NODE_PENDING, NODE_EXECUTING), \ 705 (str(n), StateString[n.state]) 706 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) 707 for p in n.waiting_parents: 708 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
709 710
711 - def trace_message(self, message):
712 return 'Taskmaster: %s\n' % message
713
714 - def trace_node(self, node):
715 return '<%-10s %-3s %s>' % (StateString[node.get_state()], 716 node.ref_count, 717 repr(str(node)))
718
719 - def _find_next_ready_node(self):
720 """ 721 Finds the next node that is ready to be built. 722 723 This is *the* main guts of the DAG walk. We loop through the 724 list of candidates, looking for something that has no un-built 725 children (i.e., that is a leaf Node or has dependencies that are 726 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned 727 (both the target Node itself and its sources, which are always 728 scanned in the context of a given target) to discover implicit 729 dependencies. A Node that must wait for some children to be 730 built will be put back on the candidates list after the children 731 have finished building. A Node that has been put back on the 732 candidates list in this way may have itself (or its sources) 733 re-scanned, in order to handle generated header files (e.g.) and 734 the implicit dependencies therein. 735 736 Note that this method does not do any signature calculation or 737 up-to-date check itself. All of that is handled by the Task 738 class. This is purely concerned with the dependency graph walk. 739 """ 740 741 self.ready_exc = None 742 743 T = self.trace 744 if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate')) 745 746 while True: 747 node = self.next_candidate() 748 if node is None: 749 if T: T.write(self.trace_message('No candidate anymore.') + u'\n') 750 return None 751 752 node = node.disambiguate() 753 state = node.get_state() 754 755 # For debugging only: 756 # 757 # try: 758 # self._validate_pending_children() 759 # except: 760 # self.ready_exc = sys.exc_info() 761 # return node 762 763 if CollectStats: 764 if not hasattr(node, 'stats'): 765 node.stats = Stats() 766 StatsNodes.append(node) 767 S = node.stats 768 S.considered = S.considered + 1 769 else: 770 S = None 771 772 if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node))) 773 774 if state == NODE_NO_STATE: 775 # Mark this node as being on the execution stack: 776 node.set_state(NODE_PENDING) 777 elif state > NODE_PENDING: 778 # Skip this node if it has already been evaluated: 779 if S: S.already_handled = S.already_handled + 1 780 if T: T.write(self.trace_message(u' already handled (executed)')) 781 continue 782 783 executor = node.get_executor() 784 785 try: 786 children = executor.get_all_children() 787 except SystemExit: 788 exc_value = sys.exc_info()[1] 789 e = SCons.Errors.ExplicitExit(node, exc_value.code) 790 self.ready_exc = (SCons.Errors.ExplicitExit, e) 791 if T: T.write(self.trace_message(' SystemExit')) 792 return node 793 except Exception, e: 794 # We had a problem just trying to figure out the 795 # children (like a child couldn't be linked in to a 796 # VariantDir, or a Scanner threw something). Arrange to 797 # raise the exception when the Task is "executed." 798 self.ready_exc = sys.exc_info() 799 if S: S.problem = S.problem + 1 800 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) 801 return node 802 803 children_not_visited = [] 804 children_pending = set() 805 children_not_ready = [] 806 children_failed = False 807 808 for child in chain(executor.get_all_prerequisites(), children): 809 childstate = child.get_state() 810 811 if T: T.write(self.trace_message(u' ' + self.trace_node(child))) 812 813 if childstate == NODE_NO_STATE: 814 children_not_visited.append(child) 815 elif childstate == NODE_PENDING: 816 children_pending.add(child) 817 elif childstate == NODE_FAILED: 818 children_failed = True 819 820 if childstate <= NODE_EXECUTING: 821 children_not_ready.append(child) 822 823 824 # These nodes have not even been visited yet. Add 825 # them to the list so that on some next pass we can 826 # take a stab at evaluating them (or their children). 827 children_not_visited.reverse() 828 self.candidates.extend(self.order(children_not_visited)) 829 #if T and children_not_visited: 830 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) 831 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) 832 833 # Skip this node if any of its children have failed. 834 # 835 # This catches the case where we're descending a top-level 836 # target and one of our children failed while trying to be 837 # built by a *previous* descent of an earlier top-level 838 # target. 839 # 840 # It can also occur if a node is reused in multiple 841 # targets. One first descends though the one of the 842 # target, the next time occurs through the other target. 843 # 844 # Note that we can only have failed_children if the 845 # --keep-going flag was used, because without it the build 846 # will stop before diving in the other branch. 847 # 848 # Note that even if one of the children fails, we still 849 # added the other children to the list of candidate nodes 850 # to keep on building (--keep-going). 851 if children_failed: 852 for n in executor.get_action_targets(): 853 n.set_state(NODE_FAILED) 854 855 if S: S.child_failed = S.child_failed + 1 856 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) 857 continue 858 859 if children_not_ready: 860 for child in children_not_ready: 861 # We're waiting on one or more derived targets 862 # that have not yet finished building. 863 if S: S.not_built = S.not_built + 1 864 865 # Add this node to the waiting parents lists of 866 # anything we're waiting on, with a reference 867 # count so we can be put back on the list for 868 # re-evaluation when they've all finished. 869 node.ref_count = node.ref_count + child.add_to_waiting_parents(node) 870 if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' % 871 (self.trace_node(node), repr(str(child))))) 872 873 if T: 874 for pc in children_pending: 875 T.write(self.trace_message(' adding %s to the pending children set\n' % 876 self.trace_node(pc))) 877 self.pending_children = self.pending_children | children_pending 878 879 continue 880 881 # Skip this node if it has side-effects that are 882 # currently being built: 883 wait_side_effects = False 884 for se in executor.get_action_side_effects(): 885 if se.get_state() == NODE_EXECUTING: 886 se.add_to_waiting_s_e(node) 887 wait_side_effects = True 888 889 if wait_side_effects: 890 if S: S.side_effects = S.side_effects + 1 891 continue 892 893 # The default when we've gotten through all of the checks above: 894 # this node is ready to be built. 895 if S: S.build = S.build + 1 896 if T: T.write(self.trace_message(u'Evaluating %s\n' % 897 self.trace_node(node))) 898 899 # For debugging only: 900 # 901 # try: 902 # self._validate_pending_children() 903 # except: 904 # self.ready_exc = sys.exc_info() 905 # return node 906 907 return node 908 909 return None
910
911 - def next_task(self):
912 """ 913 Returns the next task to be executed. 914 915 This simply asks for the next Node to be evaluated, and then wraps 916 it in the specific Task subclass with which we were initialized. 917 """ 918 node = self._find_next_ready_node() 919 920 if node is None: 921 return None 922 923 tlist = node.get_executor().get_all_targets() 924 925 task = self.tasker(self, tlist, node in self.original_top, node) 926 try: 927 task.make_ready() 928 except: 929 # We had a problem just trying to get this task ready (like 930 # a child couldn't be linked in to a VariantDir when deciding 931 # whether this node is current). Arrange to raise the 932 # exception when the Task is "executed." 933 self.ready_exc = sys.exc_info() 934 935 if self.ready_exc: 936 task.exception_set(self.ready_exc) 937 938 self.ready_exc = None 939 940 return task
941
942 - def will_not_build(self, nodes, node_func=lambda n: None):
943 """ 944 Perform clean-up about nodes that will never be built. Invokes 945 a user defined function on all of these nodes (including all 946 of their parents). 947 """ 948 949 T = self.trace 950 951 pending_children = self.pending_children 952 953 to_visit = set(nodes) 954 pending_children = pending_children - to_visit 955 956 if T: 957 for n in nodes: 958 T.write(self.trace_message(' removing node %s from the pending children set\n' % 959 self.trace_node(n))) 960 try: 961 while len(to_visit): 962 node = to_visit.pop() 963 node_func(node) 964 965 # Prune recursion by flushing the waiting children 966 # list immediately. 967 parents = node.waiting_parents 968 node.waiting_parents = set() 969 970 to_visit = to_visit | parents 971 pending_children = pending_children - parents 972 973 for p in parents: 974 p.ref_count = p.ref_count - 1 975 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % 976 self.trace_node(p))) 977 except KeyError: 978 # The container to_visit has been emptied. 979 pass 980 981 # We have the stick back the pending_children list into the 982 # taskmaster because the python 1.5.2 compatibility does not 983 # allow us to use in-place updates 984 self.pending_children = pending_children
985
986 - def stop(self):
987 """ 988 Stops the current build completely. 989 """ 990 self.next_candidate = self.no_next_candidate
991
992 - def cleanup(self):
993 """ 994 Check for dependency cycles. 995 """ 996 if not self.pending_children: 997 return 998 999 nclist = [(n, find_cycle([n], set())) for n in self.pending_children] 1000 1001 genuine_cycles = [ 1002 node for node,cycle in nclist 1003 if cycle or node.get_state() != NODE_EXECUTED 1004 ] 1005 if not genuine_cycles: 1006 # All of the "cycles" found were single nodes in EXECUTED state, 1007 # which is to say, they really weren't cycles. Just return. 1008 return 1009 1010 desc = 'Found dependency cycle(s):\n' 1011 for node, cycle in nclist: 1012 if cycle: 1013 desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" 1014 else: 1015 desc = desc + \ 1016 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ 1017 (node, repr(node), StateString[node.get_state()]) 1018 1019 raise SCons.Errors.UserError(desc)
1020 1021 # Local Variables: 1022 # tab-width:4 1023 # indent-tabs-mode:nil 1024 # End: 1025 # vim: set expandtab tabstop=4 shiftwidth=4: 1026