Package SCons :: Module Taskmaster
[hide private]
[frames] | no frames]

Source Code for Module SCons.Taskmaster

   1  # 
   2  # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation 
   3  # 
   4  # Permission is hereby granted, free of charge, to any person obtaining 
   5  # a copy of this software and associated documentation files (the 
   6  # "Software"), to deal in the Software without restriction, including 
   7  # without limitation the rights to use, copy, modify, merge, publish, 
   8  # distribute, sublicense, and/or sell copies of the Software, and to 
   9  # permit persons to whom the Software is furnished to do so, subject to 
  10  # the following conditions: 
  11  # 
  12  # The above copyright notice and this permission notice shall be included 
  13  # in all copies or substantial portions of the Software. 
  14  # 
  15  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  16  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  17  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  18  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  19  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  20  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  21  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  22  # 
  23   
  24  __doc__ = """ 
  25  Generic Taskmaster module for the SCons build engine. 
  26   
  27  This module contains the primary interface(s) between a wrapping user 
  28  interface and the SCons build engine.  There are two key classes here: 
  29   
  30      Taskmaster 
  31          This is the main engine for walking the dependency graph and 
  32          calling things to decide what does or doesn't need to be built. 
  33   
  34      Task 
  35          This is the base class for allowing a wrapping interface to 
  36          decide what does or doesn't actually need to be done.  The 
  37          intention is for a wrapping interface to subclass this as 
  38          appropriate for different types of behavior it may need. 
  39   
  40          The canonical example is the SCons native Python interface, 
  41          which has Task subclasses that handle its specific behavior, 
  42          like printing "`foo' is up to date" when a top-level target 
  43          doesn't need to be built, and handling the -c option by removing 
  44          targets as its "build" action.  There is also a separate subclass 
  45          for suppressing this output when the -q option is used. 
  46   
  47          The Taskmaster instantiates a Task object for each (set of) 
  48          target(s) that it decides need to be evaluated and/or built. 
  49  """ 
  50   
  51  __revision__ = "src/engine/SCons/Taskmaster.py 5110 2010/07/25 16:14:38 bdeegan" 
  52   
  53  from itertools import chain 
  54  import operator 
  55  import string 
  56  import sys 
  57  import traceback 
  58   
  59  import SCons.Errors 
  60  import SCons.Node 
  61  import SCons.Warnings 
  62   
  63  StateString = SCons.Node.StateString 
  64  NODE_NO_STATE = SCons.Node.no_state 
  65  NODE_PENDING = SCons.Node.pending 
  66  NODE_EXECUTING = SCons.Node.executing 
  67  NODE_UP_TO_DATE = SCons.Node.up_to_date 
  68  NODE_EXECUTED = SCons.Node.executed 
  69  NODE_FAILED = SCons.Node.failed 
  70   
  71   
  72  # A subsystem for recording stats about how different Nodes are handled by 
  73  # the main Taskmaster loop.  There's no external control here (no need for 
  74  # a --debug= option); enable it by changing the value of CollectStats. 
  75   
  76  CollectStats = None 
  77   
78 -class Stats:
79 """ 80 A simple class for holding statistics about the disposition of a 81 Node by the Taskmaster. If we're collecting statistics, each Node 82 processed by the Taskmaster gets one of these attached, in which case 83 the Taskmaster records its decision each time it processes the Node. 84 (Ideally, that's just once per Node.) 85 """
86 - def __init__(self):
87 """ 88 Instantiates a Taskmaster.Stats object, initializing all 89 appropriate counters to zero. 90 """ 91 self.considered = 0 92 self.already_handled = 0 93 self.problem = 0 94 self.child_failed = 0 95 self.not_built = 0 96 self.side_effects = 0 97 self.build = 0
98 99 StatsNodes = [] 100 101 fmt = "%(considered)3d "\ 102 "%(already_handled)3d " \ 103 "%(problem)3d " \ 104 "%(child_failed)3d " \ 105 "%(not_built)3d " \ 106 "%(side_effects)3d " \ 107 "%(build)3d " 108
109 -def dump_stats():
110 StatsNodes.sort(lambda a, b: cmp(str(a), str(b))) 111 for n in StatsNodes: 112 print (fmt % n.stats.__dict__) + str(n)
113 114 115
116 -class Task:
117 """ 118 Default SCons build engine task. 119 120 This controls the interaction of the actual building of node 121 and the rest of the engine. 122 123 This is expected to handle all of the normally-customizable 124 aspects of controlling a build, so any given application 125 *should* be able to do what it wants by sub-classing this 126 class and overriding methods as appropriate. If an application 127 needs to customze something by sub-classing Taskmaster (or 128 some other build engine class), we should first try to migrate 129 that functionality into this class. 130 131 Note that it's generally a good idea for sub-classes to call 132 these methods explicitly to update state, etc., rather than 133 roll their own interaction with Taskmaster from scratch. 134 """
135 - def __init__(self, tm, targets, top, node):
136 self.tm = tm 137 self.targets = targets 138 self.top = top 139 self.node = node 140 self.exc_clear()
141
142 - def trace_message(self, method, node, description='node'):
143 fmt = '%-20s %s %s\n' 144 return fmt % (method + ':', description, self.tm.trace_node(node))
145
146 - def display(self, message):
147 """ 148 Hook to allow the calling interface to display a message. 149 150 This hook gets called as part of preparing a task for execution 151 (that is, a Node to be built). As part of figuring out what Node 152 should be built next, the actually target list may be altered, 153 along with a message describing the alteration. The calling 154 interface can subclass Task and provide a concrete implementation 155 of this method to see those messages. 156 """ 157 pass
158
159 - def prepare(self):
160 """ 161 Called just before the task is executed. 162 163 This is mainly intended to give the target Nodes a chance to 164 unlink underlying files and make all necessary directories before 165 the Action is actually called to build the targets. 166 """ 167 T = self.tm.trace 168 if T: T.write(self.trace_message('Task.prepare()', self.node)) 169 170 # Now that it's the appropriate time, give the TaskMaster a 171 # chance to raise any exceptions it encountered while preparing 172 # this task. 173 self.exception_raise() 174 175 if self.tm.message: 176 self.display(self.tm.message) 177 self.tm.message = None 178 179 # Let the targets take care of any necessary preparations. 180 # This includes verifying that all of the necessary sources 181 # and dependencies exist, removing the target file(s), etc. 182 # 183 # As of April 2008, the get_executor().prepare() method makes 184 # sure that all of the aggregate sources necessary to build this 185 # Task's target(s) exist in one up-front check. The individual 186 # target t.prepare() methods check that each target's explicit 187 # or implicit dependencies exists, and also initialize the 188 # .sconsign info. 189 executor = self.targets[0].get_executor() 190 executor.prepare() 191 for t in executor.get_action_targets(): 192 t.prepare() 193 for s in t.side_effects: 194 s.prepare()
195
196 - def get_target(self):
197 """Fetch the target being built or updated by this task. 198 """ 199 return self.node
200
201 - def needs_execute(self):
202 # TODO(deprecate): "return True" is the old default behavior; 203 # change it to NotImplementedError (after running through the 204 # Deprecation Cycle) so the desired behavior is explicitly 205 # determined by which concrete subclass is used. 206 #raise NotImplementedError 207 msg = ('Direct use of the Taskmaster.Task class will be deprecated\n' 208 + '\tin a future release.') 209 SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg) 210 return True
211
212 - def execute(self):
213 """ 214 Called to execute the task. 215 216 This method is called from multiple threads in a parallel build, 217 so only do thread safe stuff here. Do thread unsafe stuff in 218 prepare(), executed() or failed(). 219 """ 220 T = self.tm.trace 221 if T: T.write(self.trace_message('Task.execute()', self.node)) 222 223 try: 224 everything_was_cached = 1 225 for t in self.targets: 226 if t.retrieve_from_cache(): 227 # Call the .built() method without calling the 228 # .push_to_cache() method, since we just got the 229 # target from the cache and don't need to push 230 # it back there. 231 t.set_state(NODE_EXECUTED) 232 t.built() 233 else: 234 everything_was_cached = 0 235 break 236 if not everything_was_cached: 237 self.targets[0].build() 238 except SystemExit: 239 exc_value = sys.exc_info()[1] 240 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) 241 except SCons.Errors.UserError: 242 raise 243 except SCons.Errors.BuildError: 244 raise 245 except Exception, e: 246 buildError = SCons.Errors.convert_to_BuildError(e) 247 buildError.node = self.targets[0] 248 buildError.exc_info = sys.exc_info() 249 raise buildError
250
251 - def executed_without_callbacks(self):
252 """ 253 Called when the task has been successfully executed 254 and the Taskmaster instance doesn't want to call 255 the Node's callback methods. 256 """ 257 T = self.tm.trace 258 if T: T.write(self.trace_message('Task.executed_without_callbacks()', 259 self.node)) 260 261 for t in self.targets: 262 if t.get_state() == NODE_EXECUTING: 263 for side_effect in t.side_effects: 264 side_effect.set_state(NODE_NO_STATE) 265 t.set_state(NODE_EXECUTED)
266
267 - def executed_with_callbacks(self):
268 """ 269 Called when the task has been successfully executed and 270 the Taskmaster instance wants to call the Node's callback 271 methods. 272 273 This may have been a do-nothing operation (to preserve build 274 order), so we must check the node's state before deciding whether 275 it was "built", in which case we call the appropriate Node method. 276 In any event, we always call "visited()", which will handle any 277 post-visit actions that must take place regardless of whether 278 or not the target was an actual built target or a source Node. 279 """ 280 T = self.tm.trace 281 if T: T.write(self.trace_message('Task.executed_with_callbacks()', 282 self.node)) 283 284 for t in self.targets: 285 if t.get_state() == NODE_EXECUTING: 286 for side_effect in t.side_effects: 287 side_effect.set_state(NODE_NO_STATE) 288 t.set_state(NODE_EXECUTED) 289 t.push_to_cache() 290 t.built() 291 t.visited()
292 293 executed = executed_with_callbacks 294
295 - def failed(self):
296 """ 297 Default action when a task fails: stop the build. 298 299 Note: Although this function is normally invoked on nodes in 300 the executing state, it might also be invoked on up-to-date 301 nodes when using Configure(). 302 """ 303 self.fail_stop()
304
305 - def fail_stop(self):
306 """ 307 Explicit stop-the-build failure. 308 309 This sets failure status on the target nodes and all of 310 their dependent parent nodes. 311 312 Note: Although this function is normally invoked on nodes in 313 the executing state, it might also be invoked on up-to-date 314 nodes when using Configure(). 315 """ 316 T = self.tm.trace 317 if T: T.write(self.trace_message('Task.failed_stop()', self.node)) 318 319 # Invoke will_not_build() to clean-up the pending children 320 # list. 321 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) 322 323 # Tell the taskmaster to not start any new tasks 324 self.tm.stop() 325 326 # We're stopping because of a build failure, but give the 327 # calling Task class a chance to postprocess() the top-level 328 # target under which the build failure occurred. 329 self.targets = [self.tm.current_top] 330 self.top = 1
331
332 - def fail_continue(self):
333 """ 334 Explicit continue-the-build failure. 335 336 This sets failure status on the target nodes and all of 337 their dependent parent nodes. 338 339 Note: Although this function is normally invoked on nodes in 340 the executing state, it might also be invoked on up-to-date 341 nodes when using Configure(). 342 """ 343 T = self.tm.trace 344 if T: T.write(self.trace_message('Task.failed_continue()', self.node)) 345 346 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
347
348 - def make_ready_all(self):
349 """ 350 Marks all targets in a task ready for execution. 351 352 This is used when the interface needs every target Node to be 353 visited--the canonical example being the "scons -c" option. 354 """ 355 T = self.tm.trace 356 if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) 357 358 self.out_of_date = self.targets[:] 359 for t in self.targets: 360 t.disambiguate().set_state(NODE_EXECUTING) 361 for s in t.side_effects: 362 # add disambiguate here to mirror the call on targets above 363 s.disambiguate().set_state(NODE_EXECUTING)
364
365 - def make_ready_current(self):
366 """ 367 Marks all targets in a task ready for execution if any target 368 is not current. 369 370 This is the default behavior for building only what's necessary. 371 """ 372 T = self.tm.trace 373 if T: T.write(self.trace_message('Task.make_ready_current()', 374 self.node)) 375 376 self.out_of_date = [] 377 needs_executing = False 378 for t in self.targets: 379 try: 380 t.disambiguate().make_ready() 381 is_up_to_date = not t.has_builder() or \ 382 (not t.always_build and t.is_up_to_date()) 383 except EnvironmentError, e: 384 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) 385 386 if not is_up_to_date: 387 self.out_of_date.append(t) 388 needs_executing = True 389 390 if needs_executing: 391 for t in self.targets: 392 t.set_state(NODE_EXECUTING) 393 for s in t.side_effects: 394 # add disambiguate here to mirror the call on targets in first loop above 395 s.disambiguate().set_state(NODE_EXECUTING) 396 else: 397 for t in self.targets: 398 # We must invoke visited() to ensure that the node 399 # information has been computed before allowing the 400 # parent nodes to execute. (That could occur in a 401 # parallel build...) 402 t.visited() 403 t.set_state(NODE_UP_TO_DATE)
404 405 make_ready = make_ready_current 406
407 - def postprocess(self):
408 """ 409 Post-processes a task after it's been executed. 410 411 This examines all the targets just built (or not, we don't care 412 if the build was successful, or even if there was no build 413 because everything was up-to-date) to see if they have any 414 waiting parent Nodes, or Nodes waiting on a common side effect, 415 that can be put back on the candidates list. 416 """ 417 T = self.tm.trace 418 if T: T.write(self.trace_message('Task.postprocess()', self.node)) 419 420 # We may have built multiple targets, some of which may have 421 # common parents waiting for this build. Count up how many 422 # targets each parent was waiting for so we can subtract the 423 # values later, and so we *don't* put waiting side-effect Nodes 424 # back on the candidates list if the Node is also a waiting 425 # parent. 426 427 targets = set(self.targets) 428 429 pending_children = self.tm.pending_children 430 parents = {} 431 for t in targets: 432 # A node can only be in the pending_children set if it has 433 # some waiting_parents. 434 if t.waiting_parents: 435 if T: T.write(self.trace_message('Task.postprocess()', 436 t, 437 'removing')) 438 pending_children.discard(t) 439 for p in t.waiting_parents: 440 parents[p] = parents.get(p, 0) + 1 441 442 for t in targets: 443 for s in t.side_effects: 444 if s.get_state() == NODE_EXECUTING: 445 s.set_state(NODE_NO_STATE) 446 for p in s.waiting_parents: 447 parents[p] = parents.get(p, 0) + 1 448 for p in s.waiting_s_e: 449 if p.ref_count == 0: 450 self.tm.candidates.append(p) 451 452 for p, subtract in parents.items(): 453 p.ref_count = p.ref_count - subtract 454 if T: T.write(self.trace_message('Task.postprocess()', 455 p, 456 'adjusted parent ref count')) 457 if p.ref_count == 0: 458 self.tm.candidates.append(p) 459 460 for t in targets: 461 t.postprocess()
462 463 # Exception handling subsystem. 464 # 465 # Exceptions that occur while walking the DAG or examining Nodes 466 # must be raised, but must be raised at an appropriate time and in 467 # a controlled manner so we can, if necessary, recover gracefully, 468 # possibly write out signature information for Nodes we've updated, 469 # etc. This is done by having the Taskmaster tell us about the 470 # exception, and letting 471
472 - def exc_info(self):
473 """ 474 Returns info about a recorded exception. 475 """ 476 return self.exception
477
478 - def exc_clear(self):
479 """ 480 Clears any recorded exception. 481 482 This also changes the "exception_raise" attribute to point 483 to the appropriate do-nothing method. 484 """ 485 self.exception = (None, None, None) 486 self.exception_raise = self._no_exception_to_raise
487
488 - def exception_set(self, exception=None):
489 """ 490 Records an exception to be raised at the appropriate time. 491 492 This also changes the "exception_raise" attribute to point 493 to the method that will, in fact 494 """ 495 if not exception: 496 exception = sys.exc_info() 497 self.exception = exception 498 self.exception_raise = self._exception_raise
499
500 - def _no_exception_to_raise(self):
501 pass
502
503 - def _exception_raise(self):
504 """ 505 Raises a pending exception that was recorded while getting a 506 Task ready for execution. 507 """ 508 exc = self.exc_info()[:] 509 try: 510 exc_type, exc_value, exc_traceback = exc 511 except ValueError: 512 exc_type, exc_value = exc 513 exc_traceback = None 514 raise exc_type, exc_value, exc_traceback
515
516 -class AlwaysTask(Task):
517 - def needs_execute(self):
518 """ 519 Always returns True (indicating this Task should always 520 be executed). 521 522 Subclasses that need this behavior (as opposed to the default 523 of only executing Nodes that are out of date w.r.t. their 524 dependencies) can use this as follows: 525 526 class MyTaskSubclass(SCons.Taskmaster.Task): 527 needs_execute = SCons.Taskmaster.Task.execute_always 528 """ 529 return True
530
531 -class OutOfDateTask(Task):
532 - def needs_execute(self):
533 """ 534 Returns True (indicating this Task should be executed) if this 535 Task's target state indicates it needs executing, which has 536 already been determined by an earlier up-to-date check. 537 """ 538 return self.targets[0].get_state() == SCons.Node.executing
539 540
541 -def find_cycle(stack, visited):
542 if stack[-1] in visited: 543 return None 544 visited.add(stack[-1]) 545 for n in stack[-1].waiting_parents: 546 stack.append(n) 547 if stack[0] == stack[-1]: 548 return stack 549 if find_cycle(stack, visited): 550 return stack 551 stack.pop() 552 return None
553 554
555 -class Taskmaster:
556 """ 557 The Taskmaster for walking the dependency DAG. 558 """ 559
560 - def __init__(self, targets=[], tasker=None, order=None, trace=None):
561 self.original_top = targets 562 self.top_targets_left = targets[:] 563 self.top_targets_left.reverse() 564 self.candidates = [] 565 if tasker is None: 566 tasker = OutOfDateTask 567 self.tasker = tasker 568 if not order: 569 order = lambda l: l 570 self.order = order 571 self.message = None 572 self.trace = trace 573 self.next_candidate = self.find_next_candidate 574 self.pending_children = set()
575
576 - def find_next_candidate(self):
577 """ 578 Returns the next candidate Node for (potential) evaluation. 579 580 The candidate list (really a stack) initially consists of all of 581 the top-level (command line) targets provided when the Taskmaster 582 was initialized. While we walk the DAG, visiting Nodes, all the 583 children that haven't finished processing get pushed on to the 584 candidate list. Each child can then be popped and examined in 585 turn for whether *their* children are all up-to-date, in which 586 case a Task will be created for their actual evaluation and 587 potential building. 588 589 Here is where we also allow candidate Nodes to alter the list of 590 Nodes that should be examined. This is used, for example, when 591 invoking SCons in a source directory. A source directory Node can 592 return its corresponding build directory Node, essentially saying, 593 "Hey, you really need to build this thing over here instead." 594 """ 595 try: 596 return self.candidates.pop() 597 except IndexError: 598 pass 599 try: 600 node = self.top_targets_left.pop() 601 except IndexError: 602 return None 603 self.current_top = node 604 alt, message = node.alter_targets() 605 if alt: 606 self.message = message 607 self.candidates.append(node) 608 self.candidates.extend(self.order(alt)) 609 node = self.candidates.pop() 610 return node
611
612 - def no_next_candidate(self):
613 """ 614 Stops Taskmaster processing by not returning a next candidate. 615 616 Note that we have to clean-up the Taskmaster candidate list 617 because the cycle detection depends on the fact all nodes have 618 been processed somehow. 619 """ 620 while self.candidates: 621 candidates = self.candidates 622 self.candidates = [] 623 self.will_not_build(candidates) 624 return None
625
626 - def _validate_pending_children(self):
627 """ 628 Validate the content of the pending_children set. Assert if an 629 internal error is found. 630 631 This function is used strictly for debugging the taskmaster by 632 checking that no invariants are violated. It is not used in 633 normal operation. 634 635 The pending_children set is used to detect cycles in the 636 dependency graph. We call a "pending child" a child that is 637 found in the "pending" state when checking the dependencies of 638 its parent node. 639 640 A pending child can occur when the Taskmaster completes a loop 641 through a cycle. For example, lets imagine a graph made of 642 three node (A, B and C) making a cycle. The evaluation starts 643 at node A. The taskmaster first consider whether node A's 644 child B is up-to-date. Then, recursively, node B needs to 645 check whether node C is up-to-date. This leaves us with a 646 dependency graph looking like: 647 648 Next candidate \ 649 \ 650 Node A (Pending) --> Node B(Pending) --> Node C (NoState) 651 ^ | 652 | | 653 +-------------------------------------+ 654 655 Now, when the Taskmaster examines the Node C's child Node A, 656 it finds that Node A is in the "pending" state. Therefore, 657 Node A is a pending child of node C. 658 659 Pending children indicate that the Taskmaster has potentially 660 loop back through a cycle. We say potentially because it could 661 also occur when a DAG is evaluated in parallel. For example, 662 consider the following graph: 663 664 665 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... 666 | ^ 667 | | 668 +----------> Node D (NoState) --------+ 669 / 670 Next candidate / 671 672 The Taskmaster first evaluates the nodes A, B, and C and 673 starts building some children of node C. Assuming, that the 674 maximum parallel level has not been reached, the Taskmaster 675 will examine Node D. It will find that Node C is a pending 676 child of Node D. 677 678 In summary, evaluating a graph with a cycle will always 679 involve a pending child at one point. A pending child might 680 indicate either a cycle or a diamond-shaped DAG. Only a 681 fraction of the nodes ends-up being a "pending child" of 682 another node. This keeps the pending_children set small in 683 practice. 684 685 We can differentiate between the two cases if we wait until 686 the end of the build. At this point, all the pending children 687 nodes due to a diamond-shaped DAG will have been properly 688 built (or will have failed to build). But, the pending 689 children involved in a cycle will still be in the pending 690 state. 691 692 The taskmaster removes nodes from the pending_children set as 693 soon as a pending_children node moves out of the pending 694 state. This also helps to keep the pending_children set small. 695 """ 696 697 for n in self.pending_children: 698 assert n.state in (NODE_PENDING, NODE_EXECUTING), \ 699 (str(n), StateString[n.state]) 700 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) 701 for p in n.waiting_parents: 702 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
703 704
705 - def trace_message(self, message):
706 return 'Taskmaster: %s\n' % message
707
708 - def trace_node(self, node):
709 return '<%-10s %-3s %s>' % (StateString[node.get_state()], 710 node.ref_count, 711 repr(str(node)))
712
713 - def _find_next_ready_node(self):
714 """ 715 Finds the next node that is ready to be built. 716 717 This is *the* main guts of the DAG walk. We loop through the 718 list of candidates, looking for something that has no un-built 719 children (i.e., that is a leaf Node or has dependencies that are 720 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned 721 (both the target Node itself and its sources, which are always 722 scanned in the context of a given target) to discover implicit 723 dependencies. A Node that must wait for some children to be 724 built will be put back on the candidates list after the children 725 have finished building. A Node that has been put back on the 726 candidates list in this way may have itself (or its sources) 727 re-scanned, in order to handle generated header files (e.g.) and 728 the implicit dependencies therein. 729 730 Note that this method does not do any signature calculation or 731 up-to-date check itself. All of that is handled by the Task 732 class. This is purely concerned with the dependency graph walk. 733 """ 734 735 self.ready_exc = None 736 737 T = self.trace 738 if T: T.write('\n' + self.trace_message('Looking for a node to evaluate')) 739 740 while 1: 741 node = self.next_candidate() 742 if node is None: 743 if T: T.write(self.trace_message('No candidate anymore.') + '\n') 744 return None 745 746 node = node.disambiguate() 747 state = node.get_state() 748 749 # For debugging only: 750 # 751 # try: 752 # self._validate_pending_children() 753 # except: 754 # self.ready_exc = sys.exc_info() 755 # return node 756 757 if CollectStats: 758 if not hasattr(node, 'stats'): 759 node.stats = Stats() 760 StatsNodes.append(node) 761 S = node.stats 762 S.considered = S.considered + 1 763 else: 764 S = None 765 766 if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node))) 767 768 if state == NODE_NO_STATE: 769 # Mark this node as being on the execution stack: 770 node.set_state(NODE_PENDING) 771 elif state > NODE_PENDING: 772 # Skip this node if it has already been evaluated: 773 if S: S.already_handled = S.already_handled + 1 774 if T: T.write(self.trace_message(' already handled (executed)')) 775 continue 776 777 executor = node.get_executor() 778 779 try: 780 children = executor.get_all_children() 781 except SystemExit: 782 exc_value = sys.exc_info()[1] 783 e = SCons.Errors.ExplicitExit(node, exc_value.code) 784 self.ready_exc = (SCons.Errors.ExplicitExit, e) 785 if T: T.write(self.trace_message(' SystemExit')) 786 return node 787 except Exception, e: 788 # We had a problem just trying to figure out the 789 # children (like a child couldn't be linked in to a 790 # VariantDir, or a Scanner threw something). Arrange to 791 # raise the exception when the Task is "executed." 792 self.ready_exc = sys.exc_info() 793 if S: S.problem = S.problem + 1 794 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) 795 return node 796 797 children_not_visited = [] 798 children_pending = set() 799 children_not_ready = [] 800 children_failed = False 801 802 for child in chain(executor.get_all_prerequisites(), children): 803 childstate = child.get_state() 804 805 if T: T.write(self.trace_message(' ' + self.trace_node(child))) 806 807 if childstate == NODE_NO_STATE: 808 children_not_visited.append(child) 809 elif childstate == NODE_PENDING: 810 children_pending.add(child) 811 elif childstate == NODE_FAILED: 812 children_failed = True 813 814 if childstate <= NODE_EXECUTING: 815 children_not_ready.append(child) 816 817 818 # These nodes have not even been visited yet. Add 819 # them to the list so that on some next pass we can 820 # take a stab at evaluating them (or their children). 821 children_not_visited.reverse() 822 self.candidates.extend(self.order(children_not_visited)) 823 #if T and children_not_visited: 824 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) 825 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) 826 827 # Skip this node if any of its children have failed. 828 # 829 # This catches the case where we're descending a top-level 830 # target and one of our children failed while trying to be 831 # built by a *previous* descent of an earlier top-level 832 # target. 833 # 834 # It can also occur if a node is reused in multiple 835 # targets. One first descends though the one of the 836 # target, the next time occurs through the other target. 837 # 838 # Note that we can only have failed_children if the 839 # --keep-going flag was used, because without it the build 840 # will stop before diving in the other branch. 841 # 842 # Note that even if one of the children fails, we still 843 # added the other children to the list of candidate nodes 844 # to keep on building (--keep-going). 845 if children_failed: 846 for n in executor.get_action_targets(): 847 n.set_state(NODE_FAILED) 848 849 if S: S.child_failed = S.child_failed + 1 850 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) 851 continue 852 853 if children_not_ready: 854 for child in children_not_ready: 855 # We're waiting on one or more derived targets 856 # that have not yet finished building. 857 if S: S.not_built = S.not_built + 1 858 859 # Add this node to the waiting parents lists of 860 # anything we're waiting on, with a reference 861 # count so we can be put back on the list for 862 # re-evaluation when they've all finished. 863 node.ref_count = node.ref_count + child.add_to_waiting_parents(node) 864 if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' % 865 (self.trace_node(node), repr(str(child))))) 866 867 if T: 868 for pc in children_pending: 869 T.write(self.trace_message(' adding %s to the pending children set\n' % 870 self.trace_node(pc))) 871 self.pending_children = self.pending_children | children_pending 872 873 continue 874 875 # Skip this node if it has side-effects that are 876 # currently being built: 877 wait_side_effects = False 878 for se in executor.get_action_side_effects(): 879 if se.get_state() == NODE_EXECUTING: 880 se.add_to_waiting_s_e(node) 881 wait_side_effects = True 882 883 if wait_side_effects: 884 if S: S.side_effects = S.side_effects + 1 885 continue 886 887 # The default when we've gotten through all of the checks above: 888 # this node is ready to be built. 889 if S: S.build = S.build + 1 890 if T: T.write(self.trace_message('Evaluating %s\n' % 891 self.trace_node(node))) 892 893 # For debugging only: 894 # 895 # try: 896 # self._validate_pending_children() 897 # except: 898 # self.ready_exc = sys.exc_info() 899 # return node 900 901 return node 902 903 return None
904
905 - def next_task(self):
906 """ 907 Returns the next task to be executed. 908 909 This simply asks for the next Node to be evaluated, and then wraps 910 it in the specific Task subclass with which we were initialized. 911 """ 912 node = self._find_next_ready_node() 913 914 if node is None: 915 return None 916 917 tlist = node.get_executor().get_all_targets() 918 919 task = self.tasker(self, tlist, node in self.original_top, node) 920 try: 921 task.make_ready() 922 except: 923 # We had a problem just trying to get this task ready (like 924 # a child couldn't be linked in to a VariantDir when deciding 925 # whether this node is current). Arrange to raise the 926 # exception when the Task is "executed." 927 self.ready_exc = sys.exc_info() 928 929 if self.ready_exc: 930 task.exception_set(self.ready_exc) 931 932 self.ready_exc = None 933 934 return task
935
936 - def will_not_build(self, nodes, node_func=lambda n: None):
937 """ 938 Perform clean-up about nodes that will never be built. Invokes 939 a user defined function on all of these nodes (including all 940 of their parents). 941 """ 942 943 T = self.trace 944 945 pending_children = self.pending_children 946 947 to_visit = set(nodes) 948 pending_children = pending_children - to_visit 949 950 if T: 951 for n in nodes: 952 T.write(self.trace_message(' removing node %s from the pending children set\n' % 953 self.trace_node(n))) 954 try: 955 while 1: 956 try: 957 node = to_visit.pop() 958 except AttributeError: 959 # Python 1.5.2 960 if len(to_visit): 961 node = to_visit[0] 962 to_visit.remove(node) 963 else: 964 break 965 966 node_func(node) 967 968 # Prune recursion by flushing the waiting children 969 # list immediately. 970 parents = node.waiting_parents 971 node.waiting_parents = set() 972 973 to_visit = to_visit | parents 974 pending_children = pending_children - parents 975 976 for p in parents: 977 p.ref_count = p.ref_count - 1 978 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % 979 self.trace_node(p))) 980 except KeyError: 981 # The container to_visit has been emptied. 982 pass 983 984 # We have the stick back the pending_children list into the 985 # task master because the python 1.5.2 compatibility does not 986 # allow us to use in-place updates 987 self.pending_children = pending_children
988
989 - def stop(self):
990 """ 991 Stops the current build completely. 992 """ 993 self.next_candidate = self.no_next_candidate
994
995 - def cleanup(self):
996 """ 997 Check for dependency cycles. 998 """ 999 if not self.pending_children: 1000 return 1001 1002 # TODO(1.5) 1003 #nclist = [ (n, find_cycle([n], set())) for n in self.pending_children ] 1004 nclist = map(lambda n: (n, find_cycle([n], set())), self.pending_children) 1005 1006 # TODO(1.5) 1007 #genuine_cycles = [ 1008 # node for node, cycle in nclist 1009 # if cycle or node.get_state() != NODE_EXECUTED 1010 #] 1011 genuine_cycles = filter(lambda t: t[1] or t[0].get_state() != NODE_EXECUTED, nclist) 1012 if not genuine_cycles: 1013 # All of the "cycles" found were single nodes in EXECUTED state, 1014 # which is to say, they really weren't cycles. Just return. 1015 return 1016 1017 desc = 'Found dependency cycle(s):\n' 1018 for node, cycle in nclist: 1019 if cycle: 1020 desc = desc + " " + string.join(map(str, cycle), " -> ") + "\n" 1021 else: 1022 desc = desc + \ 1023 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ 1024 (node, repr(node), StateString[node.get_state()]) 1025 1026 raise SCons.Errors.UserError, desc
1027 1028 # Local Variables: 1029 # tab-width:4 1030 # indent-tabs-mode:nil 1031 # End: 1032 # vim: set expandtab tabstop=4 shiftwidth=4: 1033