Package SCons :: Package Node :: Module FS
[hide private]
[frames] | no frames]

Source Code for Module SCons.Node.FS

   1  """scons.Node.FS 
   2   
   3  File system nodes. 
   4   
   5  These Nodes represent the canonical external objects that people think 
   6  of when they think of building software: files and directories. 
   7   
   8  This holds a "default_fs" variable that should be initialized with an FS 
   9  that can be used by scripts or modules looking for the canonical default. 
  10   
  11  """ 
  12   
  13  # 
  14  # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation 
  15  # 
  16  # Permission is hereby granted, free of charge, to any person obtaining 
  17  # a copy of this software and associated documentation files (the 
  18  # "Software"), to deal in the Software without restriction, including 
  19  # without limitation the rights to use, copy, modify, merge, publish, 
  20  # distribute, sublicense, and/or sell copies of the Software, and to 
  21  # permit persons to whom the Software is furnished to do so, subject to 
  22  # the following conditions: 
  23  # 
  24  # The above copyright notice and this permission notice shall be included 
  25  # in all copies or substantial portions of the Software. 
  26  # 
  27  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  28  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  29  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  30  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  31  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  32  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  33  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  34  # 
  35   
  36  __revision__ = "src/engine/SCons/Node/FS.py 3603 2008/10/10 05:46:45 scons" 
  37   
  38  import fnmatch 
  39  from itertools import izip 
  40  import os 
  41  import os.path 
  42  import re 
  43  import shutil 
  44  import stat 
  45  import string 
  46  import sys 
  47  import time 
  48  import cStringIO 
  49   
  50  import SCons.Action 
  51  from SCons.Debug import logInstanceCreation 
  52  import SCons.Errors 
  53  import SCons.Memoize 
  54  import SCons.Node 
  55  import SCons.Node.Alias 
  56  import SCons.Subst 
  57  import SCons.Util 
  58  import SCons.Warnings 
  59   
  60  from SCons.Debug import Trace 
  61   
  62  do_store_info = True 
  63   
  64  # The max_drift value:  by default, use a cached signature value for 
  65  # any file that's been untouched for more than two days. 
  66  default_max_drift = 2*24*60*60 
  67   
  68  # 
  69  # We stringify these file system Nodes a lot.  Turning a file system Node 
  70  # into a string is non-trivial, because the final string representation 
  71  # can depend on a lot of factors:  whether it's a derived target or not, 
  72  # whether it's linked to a repository or source directory, and whether 
  73  # there's duplication going on.  The normal technique for optimizing 
  74  # calculations like this is to memoize (cache) the string value, so you 
  75  # only have to do the calculation once. 
  76  # 
  77  # A number of the above factors, however, can be set after we've already 
  78  # been asked to return a string for a Node, because a Repository() or 
  79  # VariantDir() call or the like may not occur until later in SConscript 
  80  # files.  So this variable controls whether we bother trying to save 
  81  # string values for Nodes.  The wrapper interface can set this whenever 
  82  # they're done mucking with Repository and VariantDir and the other stuff, 
  83  # to let this module know it can start returning saved string values 
  84  # for Nodes. 
  85  # 
  86  Save_Strings = None 
  87   
88 -def save_strings(val):
89 global Save_Strings 90 Save_Strings = val
91 92 # 93 # Avoid unnecessary function calls by recording a Boolean value that 94 # tells us whether or not os.path.splitdrive() actually does anything 95 # on this system, and therefore whether we need to bother calling it 96 # when looking up path names in various methods below. 97 # 98 99 do_splitdrive = None 100
101 -def initialize_do_splitdrive():
102 global do_splitdrive 103 drive, path = os.path.splitdrive('X:/foo') 104 do_splitdrive = not not drive
105 106 initialize_do_splitdrive() 107 108 # 109 110 needs_normpath_check = None 111
112 -def initialize_normpath_check():
113 """ 114 Initialize the normpath_check regular expression. 115 116 This function is used by the unit tests to re-initialize the pattern 117 when testing for behavior with different values of os.sep. 118 """ 119 global needs_normpath_check 120 if os.sep == '/': 121 pattern = r'.*/|\.$|\.\.$' 122 else: 123 pattern = r'.*[/%s]|\.$|\.\.$' % re.escape(os.sep) 124 needs_normpath_check = re.compile(pattern)
125 126 initialize_normpath_check() 127 128 # 129 # SCons.Action objects for interacting with the outside world. 130 # 131 # The Node.FS methods in this module should use these actions to 132 # create and/or remove files and directories; they should *not* use 133 # os.{link,symlink,unlink,mkdir}(), etc., directly. 134 # 135 # Using these SCons.Action objects ensures that descriptions of these 136 # external activities are properly displayed, that the displays are 137 # suppressed when the -s (silent) option is used, and (most importantly) 138 # the actions are disabled when the the -n option is used, in which case 139 # there should be *no* changes to the external file system(s)... 140 # 141 142 if hasattr(os, 'link'): 155 else: 156 _hardlink_func = None 157 158 if hasattr(os, 'symlink'): 161 else: 162 _softlink_func = None 163
164 -def _copy_func(fs, src, dest):
165 shutil.copy2(src, dest) 166 st = fs.stat(src) 167 fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
168 169 170 Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy', 171 'hard-copy', 'soft-copy', 'copy'] 172 173 Link_Funcs = [] # contains the callables of the specified duplication style 174
175 -def set_duplicate(duplicate):
176 # Fill in the Link_Funcs list according to the argument 177 # (discarding those not available on the platform). 178 179 # Set up the dictionary that maps the argument names to the 180 # underlying implementations. We do this inside this function, 181 # not in the top-level module code, so that we can remap os.link 182 # and os.symlink for testing purposes. 183 link_dict = { 184 'hard' : _hardlink_func, 185 'soft' : _softlink_func, 186 'copy' : _copy_func 187 } 188 189 if not duplicate in Valid_Duplicates: 190 raise SCons.Errors.InternalError, ("The argument of set_duplicate " 191 "should be in Valid_Duplicates") 192 global Link_Funcs 193 Link_Funcs = [] 194 for func in string.split(duplicate,'-'): 195 if link_dict[func]: 196 Link_Funcs.append(link_dict[func])
197
198 -def LinkFunc(target, source, env):
199 # Relative paths cause problems with symbolic links, so 200 # we use absolute paths, which may be a problem for people 201 # who want to move their soft-linked src-trees around. Those 202 # people should use the 'hard-copy' mode, softlinks cannot be 203 # used for that; at least I have no idea how ... 204 src = source[0].abspath 205 dest = target[0].abspath 206 dir, file = os.path.split(dest) 207 if dir and not target[0].fs.isdir(dir): 208 os.makedirs(dir) 209 if not Link_Funcs: 210 # Set a default order of link functions. 211 set_duplicate('hard-soft-copy') 212 fs = source[0].fs 213 # Now link the files with the previously specified order. 214 for func in Link_Funcs: 215 try: 216 func(fs, src, dest) 217 break 218 except (IOError, OSError): 219 # An OSError indicates something happened like a permissions 220 # problem or an attempt to symlink across file-system 221 # boundaries. An IOError indicates something like the file 222 # not existing. In either case, keeping trying additional 223 # functions in the list and only raise an error if the last 224 # one failed. 225 if func == Link_Funcs[-1]: 226 # exception of the last link method (copy) are fatal 227 raise 228 else: 229 pass 230 return 0
231 232 Link = SCons.Action.Action(LinkFunc, None)
233 -def LocalString(target, source, env):
234 return 'Local copy of %s from %s' % (target[0], source[0])
235 236 LocalCopy = SCons.Action.Action(LinkFunc, LocalString) 237
238 -def UnlinkFunc(target, source, env):
239 t = target[0] 240 t.fs.unlink(t.abspath) 241 return 0
242 243 Unlink = SCons.Action.Action(UnlinkFunc, None) 244
245 -def MkdirFunc(target, source, env):
246 t = target[0] 247 if not t.exists(): 248 t.fs.mkdir(t.abspath) 249 return 0
250 251 Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None) 252 253 MkdirBuilder = None 254
255 -def get_MkdirBuilder():
256 global MkdirBuilder 257 if MkdirBuilder is None: 258 import SCons.Builder 259 import SCons.Defaults 260 # "env" will get filled in by Executor.get_build_env() 261 # calling SCons.Defaults.DefaultEnvironment() when necessary. 262 MkdirBuilder = SCons.Builder.Builder(action = Mkdir, 263 env = None, 264 explain = None, 265 is_explicit = None, 266 target_scanner = SCons.Defaults.DirEntryScanner, 267 name = "MkdirBuilder") 268 return MkdirBuilder
269
270 -class _Null:
271 pass
272 273 _null = _Null() 274 275 DefaultSCCSBuilder = None 276 DefaultRCSBuilder = None 277
278 -def get_DefaultSCCSBuilder():
279 global DefaultSCCSBuilder 280 if DefaultSCCSBuilder is None: 281 import SCons.Builder 282 # "env" will get filled in by Executor.get_build_env() 283 # calling SCons.Defaults.DefaultEnvironment() when necessary. 284 act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR') 285 DefaultSCCSBuilder = SCons.Builder.Builder(action = act, 286 env = None, 287 name = "DefaultSCCSBuilder") 288 return DefaultSCCSBuilder
289
290 -def get_DefaultRCSBuilder():
291 global DefaultRCSBuilder 292 if DefaultRCSBuilder is None: 293 import SCons.Builder 294 # "env" will get filled in by Executor.get_build_env() 295 # calling SCons.Defaults.DefaultEnvironment() when necessary. 296 act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR') 297 DefaultRCSBuilder = SCons.Builder.Builder(action = act, 298 env = None, 299 name = "DefaultRCSBuilder") 300 return DefaultRCSBuilder
301 302 # Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem. 303 _is_cygwin = sys.platform == "cygwin" 304 if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
305 - def _my_normcase(x):
306 return x
307 else:
308 - def _my_normcase(x):
309 return string.upper(x)
310 311 312
313 -class DiskChecker:
314 - def __init__(self, type, do, ignore):
315 self.type = type 316 self.do = do 317 self.ignore = ignore 318 self.set_do()
319 - def set_do(self):
320 self.__call__ = self.do
321 - def set_ignore(self):
322 self.__call__ = self.ignore
323 - def set(self, list):
324 if self.type in list: 325 self.set_do() 326 else: 327 self.set_ignore()
328
329 -def do_diskcheck_match(node, predicate, errorfmt):
330 result = predicate() 331 try: 332 # If calling the predicate() cached a None value from stat(), 333 # remove it so it doesn't interfere with later attempts to 334 # build this Node as we walk the DAG. (This isn't a great way 335 # to do this, we're reaching into an interface that doesn't 336 # really belong to us, but it's all about performance, so 337 # for now we'll just document the dependency...) 338 if node._memo['stat'] is None: 339 del node._memo['stat'] 340 except (AttributeError, KeyError): 341 pass 342 if result: 343 raise TypeError, errorfmt % node.abspath
344
345 -def ignore_diskcheck_match(node, predicate, errorfmt):
346 pass
347
348 -def do_diskcheck_rcs(node, name):
349 try: 350 rcs_dir = node.rcs_dir 351 except AttributeError: 352 if node.entry_exists_on_disk('RCS'): 353 rcs_dir = node.Dir('RCS') 354 else: 355 rcs_dir = None 356 node.rcs_dir = rcs_dir 357 if rcs_dir: 358 return rcs_dir.entry_exists_on_disk(name+',v') 359 return None
360
361 -def ignore_diskcheck_rcs(node, name):
362 return None
363
364 -def do_diskcheck_sccs(node, name):
365 try: 366 sccs_dir = node.sccs_dir 367 except AttributeError: 368 if node.entry_exists_on_disk('SCCS'): 369 sccs_dir = node.Dir('SCCS') 370 else: 371 sccs_dir = None 372 node.sccs_dir = sccs_dir 373 if sccs_dir: 374 return sccs_dir.entry_exists_on_disk('s.'+name) 375 return None
376
377 -def ignore_diskcheck_sccs(node, name):
378 return None
379 380 diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match) 381 diskcheck_rcs = DiskChecker('rcs', do_diskcheck_rcs, ignore_diskcheck_rcs) 382 diskcheck_sccs = DiskChecker('sccs', do_diskcheck_sccs, ignore_diskcheck_sccs) 383 384 diskcheckers = [ 385 diskcheck_match, 386 diskcheck_rcs, 387 diskcheck_sccs, 388 ] 389
390 -def set_diskcheck(list):
391 for dc in diskcheckers: 392 dc.set(list)
393
394 -def diskcheck_types():
395 return map(lambda dc: dc.type, diskcheckers)
396 397 398
399 -class EntryProxy(SCons.Util.Proxy):
400 - def __get_abspath(self):
401 entry = self.get() 402 return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(), 403 entry.name + "_abspath")
404
405 - def __get_filebase(self):
406 name = self.get().name 407 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0], 408 name + "_filebase")
409
410 - def __get_suffix(self):
411 name = self.get().name 412 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1], 413 name + "_suffix")
414
415 - def __get_file(self):
416 name = self.get().name 417 return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
418
419 - def __get_base_path(self):
420 """Return the file's directory and file name, with the 421 suffix stripped.""" 422 entry = self.get() 423 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0], 424 entry.name + "_base")
425
426 - def __get_posix_path(self):
427 """Return the path with / as the path separator, 428 regardless of platform.""" 429 if os.sep == '/': 430 return self 431 else: 432 entry = self.get() 433 r = string.replace(entry.get_path(), os.sep, '/') 434 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
435
436 - def __get_windows_path(self):
437 """Return the path with \ as the path separator, 438 regardless of platform.""" 439 if os.sep == '\\': 440 return self 441 else: 442 entry = self.get() 443 r = string.replace(entry.get_path(), os.sep, '\\') 444 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
445
446 - def __get_srcnode(self):
447 return EntryProxy(self.get().srcnode())
448
449 - def __get_srcdir(self):
450 """Returns the directory containing the source node linked to this 451 node via VariantDir(), or the directory of this node if not linked.""" 452 return EntryProxy(self.get().srcnode().dir)
453
454 - def __get_rsrcnode(self):
455 return EntryProxy(self.get().srcnode().rfile())
456
457 - def __get_rsrcdir(self):
458 """Returns the directory containing the source node linked to this 459 node via VariantDir(), or the directory of this node if not linked.""" 460 return EntryProxy(self.get().srcnode().rfile().dir)
461
462 - def __get_dir(self):
463 return EntryProxy(self.get().dir)
464 465 dictSpecialAttrs = { "base" : __get_base_path, 466 "posix" : __get_posix_path, 467 "windows" : __get_windows_path, 468 "win32" : __get_windows_path, 469 "srcpath" : __get_srcnode, 470 "srcdir" : __get_srcdir, 471 "dir" : __get_dir, 472 "abspath" : __get_abspath, 473 "filebase" : __get_filebase, 474 "suffix" : __get_suffix, 475 "file" : __get_file, 476 "rsrcpath" : __get_rsrcnode, 477 "rsrcdir" : __get_rsrcdir, 478 } 479
480 - def __getattr__(self, name):
481 # This is how we implement the "special" attributes 482 # such as base, posix, srcdir, etc. 483 try: 484 attr_function = self.dictSpecialAttrs[name] 485 except KeyError: 486 try: 487 attr = SCons.Util.Proxy.__getattr__(self, name) 488 except AttributeError: 489 entry = self.get() 490 classname = string.split(str(entry.__class__), '.')[-1] 491 if classname[-2:] == "'>": 492 # new-style classes report their name as: 493 # "<class 'something'>" 494 # instead of the classic classes: 495 # "something" 496 classname = classname[:-2] 497 raise AttributeError, "%s instance '%s' has no attribute '%s'" % (classname, entry.name, name) 498 return attr 499 else: 500 return attr_function(self)
501
502 -class Base(SCons.Node.Node):
503 """A generic class for file system entries. This class is for 504 when we don't know yet whether the entry being looked up is a file 505 or a directory. Instances of this class can morph into either 506 Dir or File objects by a later, more precise lookup. 507 508 Note: this class does not define __cmp__ and __hash__ for 509 efficiency reasons. SCons does a lot of comparing of 510 Node.FS.{Base,Entry,File,Dir} objects, so those operations must be 511 as fast as possible, which means we want to use Python's built-in 512 object identity comparisons. 513 """ 514 515 memoizer_counters = [] 516
517 - def __init__(self, name, directory, fs):
518 """Initialize a generic Node.FS.Base object. 519 520 Call the superclass initialization, take care of setting up 521 our relative and absolute paths, identify our parent 522 directory, and indicate that this node should use 523 signatures.""" 524 if __debug__: logInstanceCreation(self, 'Node.FS.Base') 525 SCons.Node.Node.__init__(self) 526 527 self.name = name 528 self.suffix = SCons.Util.splitext(name)[1] 529 self.fs = fs 530 531 assert directory, "A directory must be provided" 532 533 self.abspath = directory.entry_abspath(name) 534 self.labspath = directory.entry_labspath(name) 535 if directory.path == '.': 536 self.path = name 537 else: 538 self.path = directory.entry_path(name) 539 if directory.tpath == '.': 540 self.tpath = name 541 else: 542 self.tpath = directory.entry_tpath(name) 543 self.path_elements = directory.path_elements + [self] 544 545 self.dir = directory 546 self.cwd = None # will hold the SConscript directory for target nodes 547 self.duplicate = directory.duplicate
548
549 - def str_for_display(self):
550 return '"' + self.__str__() + '"'
551
552 - def must_be_same(self, klass):
553 """ 554 This node, which already existed, is being looked up as the 555 specified klass. Raise an exception if it isn't. 556 """ 557 if self.__class__ is klass or klass is Entry: 558 return 559 raise TypeError, "Tried to lookup %s '%s' as a %s." %\ 560 (self.__class__.__name__, self.path, klass.__name__)
561
562 - def get_dir(self):
563 return self.dir
564
565 - def get_suffix(self):
566 return self.suffix
567
568 - def rfile(self):
569 return self
570
571 - def __str__(self):
572 """A Node.FS.Base object's string representation is its path 573 name.""" 574 global Save_Strings 575 if Save_Strings: 576 return self._save_str() 577 return self._get_str()
578 579 memoizer_counters.append(SCons.Memoize.CountValue('_save_str')) 580
581 - def _save_str(self):
582 try: 583 return self._memo['_save_str'] 584 except KeyError: 585 pass 586 result = self._get_str() 587 self._memo['_save_str'] = result 588 return result
589
590 - def _get_str(self):
591 global Save_Strings 592 if self.duplicate or self.is_derived(): 593 return self.get_path() 594 srcnode = self.srcnode() 595 if srcnode.stat() is None and not self.stat() is None: 596 result = self.get_path() 597 else: 598 result = srcnode.get_path() 599 if not Save_Strings: 600 # We're not at the point where we're saving the string string 601 # representations of FS Nodes (because we haven't finished 602 # reading the SConscript files and need to have str() return 603 # things relative to them). That also means we can't yet 604 # cache values returned (or not returned) by stat(), since 605 # Python code in the SConscript files might still create 606 # or otherwise affect the on-disk file. So get rid of the 607 # values that the underlying stat() method saved. 608 try: del self._memo['stat'] 609 except KeyError: pass 610 if not self is srcnode: 611 try: del srcnode._memo['stat'] 612 except KeyError: pass 613 return result
614 615 rstr = __str__ 616 617 memoizer_counters.append(SCons.Memoize.CountValue('stat')) 618
619 - def stat(self):
620 try: return self._memo['stat'] 621 except KeyError: pass 622 try: result = self.fs.stat(self.abspath) 623 except os.error: result = None 624 self._memo['stat'] = result 625 return result
626
627 - def exists(self):
628 return not self.stat() is None
629
630 - def rexists(self):
631 return self.rfile().exists()
632
633 - def getmtime(self):
634 st = self.stat() 635 if st: return st[stat.ST_MTIME] 636 else: return None
637
638 - def getsize(self):
639 st = self.stat() 640 if st: return st[stat.ST_SIZE] 641 else: return None
642
643 - def isdir(self):
644 st = self.stat() 645 return not st is None and stat.S_ISDIR(st[stat.ST_MODE])
646
647 - def isfile(self):
648 st = self.stat() 649 return not st is None and stat.S_ISREG(st[stat.ST_MODE])
650 651 if hasattr(os, 'symlink'): 656 else: 659
660 - def is_under(self, dir):
661 if self is dir: 662 return 1 663 else: 664 return self.dir.is_under(dir)
665
666 - def set_local(self):
667 self._local = 1
668
669 - def srcnode(self):
670 """If this node is in a build path, return the node 671 corresponding to its source file. Otherwise, return 672 ourself. 673 """ 674 srcdir_list = self.dir.srcdir_list() 675 if srcdir_list: 676 srcnode = srcdir_list[0].Entry(self.name) 677 srcnode.must_be_same(self.__class__) 678 return srcnode 679 return self
680
681 - def get_path(self, dir=None):
682 """Return path relative to the current working directory of the 683 Node.FS.Base object that owns us.""" 684 if not dir: 685 dir = self.fs.getcwd() 686 if self == dir: 687 return '.' 688 path_elems = self.path_elements 689 try: i = path_elems.index(dir) 690 except ValueError: pass 691 else: path_elems = path_elems[i+1:] 692 path_elems = map(lambda n: n.name, path_elems) 693 return string.join(path_elems, os.sep)
694
695 - def set_src_builder(self, builder):
696 """Set the source code builder for this node.""" 697 self.sbuilder = builder 698 if not self.has_builder(): 699 self.builder_set(builder)
700
701 - def src_builder(self):
702 """Fetch the source code builder for this node. 703 704 If there isn't one, we cache the source code builder specified 705 for the directory (which in turn will cache the value from its 706 parent directory, and so on up to the file system root). 707 """ 708 try: 709 scb = self.sbuilder 710 except AttributeError: 711 scb = self.dir.src_builder() 712 self.sbuilder = scb 713 return scb
714
715 - def get_abspath(self):
716 """Get the absolute path of the file.""" 717 return self.abspath
718
719 - def for_signature(self):
720 # Return just our name. Even an absolute path would not work, 721 # because that can change thanks to symlinks or remapped network 722 # paths. 723 return self.name
724
725 - def get_subst_proxy(self):
726 try: 727 return self._proxy 728 except AttributeError: 729 ret = EntryProxy(self) 730 self._proxy = ret 731 return ret
732
733 - def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
734 """ 735 736 Generates a target entry that corresponds to this entry (usually 737 a source file) with the specified prefix and suffix. 738 739 Note that this method can be overridden dynamically for generated 740 files that need different behavior. See Tool/swig.py for 741 an example. 742 """ 743 return self.dir.Entry(prefix + splitext(self.name)[0] + suffix)
744
745 - def _Rfindalldirs_key(self, pathlist):
746 return pathlist
747 748 memoizer_counters.append(SCons.Memoize.CountDict('Rfindalldirs', _Rfindalldirs_key)) 749
750 - def Rfindalldirs(self, pathlist):
751 """ 752 Return all of the directories for a given path list, including 753 corresponding "backing" directories in any repositories. 754 755 The Node lookups are relative to this Node (typically a 756 directory), so memoizing result saves cycles from looking 757 up the same path for each target in a given directory. 758 """ 759 try: 760 memo_dict = self._memo['Rfindalldirs'] 761 except KeyError: 762 memo_dict = {} 763 self._memo['Rfindalldirs'] = memo_dict 764 else: 765 try: 766 return memo_dict[pathlist] 767 except KeyError: 768 pass 769 770 create_dir_relative_to_self = self.Dir 771 result = [] 772 for path in pathlist: 773 if isinstance(path, SCons.Node.Node): 774 result.append(path) 775 else: 776 dir = create_dir_relative_to_self(path) 777 result.extend(dir.get_all_rdirs()) 778 779 memo_dict[pathlist] = result 780 781 return result
782
783 - def RDirs(self, pathlist):
784 """Search for a list of directories in the Repository list.""" 785 cwd = self.cwd or self.fs._cwd 786 return cwd.Rfindalldirs(pathlist)
787 788 memoizer_counters.append(SCons.Memoize.CountValue('rentry')) 789
790 - def rentry(self):
791 try: 792 return self._memo['rentry'] 793 except KeyError: 794 pass 795 result = self 796 if not self.exists(): 797 norm_name = _my_normcase(self.name) 798 for dir in self.dir.get_all_rdirs(): 799 try: 800 node = dir.entries[norm_name] 801 except KeyError: 802 if dir.entry_exists_on_disk(self.name): 803 result = dir.Entry(self.name) 804 break 805 self._memo['rentry'] = result 806 return result
807
808 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
809 return []
810
811 -class Entry(Base):
812 """This is the class for generic Node.FS entries--that is, things 813 that could be a File or a Dir, but we're just not sure yet. 814 Consequently, the methods in this class really exist just to 815 transform their associated object into the right class when the 816 time comes, and then call the same-named method in the transformed 817 class.""" 818
819 - def diskcheck_match(self):
820 pass
821
822 - def disambiguate(self, must_exist=None):
823 """ 824 """ 825 if self.isdir(): 826 self.__class__ = Dir 827 self._morph() 828 elif self.isfile(): 829 self.__class__ = File 830 self._morph() 831 self.clear() 832 else: 833 # There was nothing on-disk at this location, so look in 834 # the src directory. 835 # 836 # We can't just use self.srcnode() straight away because 837 # that would create an actual Node for this file in the src 838 # directory, and there might not be one. Instead, use the 839 # dir_on_disk() method to see if there's something on-disk 840 # with that name, in which case we can go ahead and call 841 # self.srcnode() to create the right type of entry. 842 srcdir = self.dir.srcnode() 843 if srcdir != self.dir and \ 844 srcdir.entry_exists_on_disk(self.name) and \ 845 self.srcnode().isdir(): 846 self.__class__ = Dir 847 self._morph() 848 elif must_exist: 849 msg = "No such file or directory: '%s'" % self.abspath 850 raise SCons.Errors.UserError, msg 851 else: 852 self.__class__ = File 853 self._morph() 854 self.clear() 855 return self
856
857 - def rfile(self):
858 """We're a generic Entry, but the caller is actually looking for 859 a File at this point, so morph into one.""" 860 self.__class__ = File 861 self._morph() 862 self.clear() 863 return File.rfile(self)
864
865 - def scanner_key(self):
866 return self.get_suffix()
867
868 - def get_contents(self):
869 """Fetch the contents of the entry. 870 871 Since this should return the real contents from the file 872 system, we check to see into what sort of subclass we should 873 morph this Entry.""" 874 try: 875 self = self.disambiguate(must_exist=1) 876 except SCons.Errors.UserError: 877 # There was nothing on disk with which to disambiguate 878 # this entry. Leave it as an Entry, but return a null 879 # string so calls to get_contents() in emitters and the 880 # like (e.g. in qt.py) don't have to disambiguate by hand 881 # or catch the exception. 882 return '' 883 else: 884 return self.get_contents()
885
886 - def must_be_same(self, klass):
887 """Called to make sure a Node is a Dir. Since we're an 888 Entry, we can morph into one.""" 889 if not self.__class__ is klass: 890 self.__class__ = klass 891 self._morph() 892 self.clear()
893 894 # The following methods can get called before the Taskmaster has 895 # had a chance to call disambiguate() directly to see if this Entry 896 # should really be a Dir or a File. We therefore use these to call 897 # disambiguate() transparently (from our caller's point of view). 898 # 899 # Right now, this minimal set of methods has been derived by just 900 # looking at some of the methods that will obviously be called early 901 # in any of the various Taskmasters' calling sequences, and then 902 # empirically figuring out which additional methods are necessary 903 # to make various tests pass. 904
905 - def exists(self):
906 """Return if the Entry exists. Check the file system to see 907 what we should turn into first. Assume a file if there's no 908 directory.""" 909 return self.disambiguate().exists()
910
911 - def rel_path(self, other):
912 d = self.disambiguate() 913 if d.__class__ == Entry: 914 raise "rel_path() could not disambiguate File/Dir" 915 return d.rel_path(other)
916
917 - def new_ninfo(self):
918 return self.disambiguate().new_ninfo()
919
920 - def changed_since_last_build(self, target, prev_ni):
921 return self.disambiguate().changed_since_last_build(target, prev_ni)
922
923 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
924 return self.disambiguate()._glob1(pattern, ondisk, source, strings)
925 926 # This is for later so we can differentiate between Entry the class and Entry 927 # the method of the FS class. 928 _classEntry = Entry 929 930
931 -class LocalFS:
932 933 if SCons.Memoize.use_memoizer: 934 __metaclass__ = SCons.Memoize.Memoized_Metaclass 935 936 # This class implements an abstraction layer for operations involving 937 # a local file system. Essentially, this wraps any function in 938 # the os, os.path or shutil modules that we use to actually go do 939 # anything with or to the local file system. 940 # 941 # Note that there's a very good chance we'll refactor this part of 942 # the architecture in some way as we really implement the interface(s) 943 # for remote file system Nodes. For example, the right architecture 944 # might be to have this be a subclass instead of a base class. 945 # Nevertheless, we're using this as a first step in that direction. 946 # 947 # We're not using chdir() yet because the calling subclass method 948 # needs to use os.chdir() directly to avoid recursion. Will we 949 # really need this one? 950 #def chdir(self, path): 951 # return os.chdir(path)
952 - def chmod(self, path, mode):
953 return os.chmod(path, mode)
954 - def copy(self, src, dst):
955 return shutil.copy(src, dst)
956 - def copy2(self, src, dst):
957 return shutil.copy2(src, dst)
958 - def exists(self, path):
959 return os.path.exists(path)
960 - def getmtime(self, path):
961 return os.path.getmtime(path)
962 - def getsize(self, path):
963 return os.path.getsize(path)
964 - def isdir(self, path):
965 return os.path.isdir(path)
966 - def isfile(self, path):
967 return os.path.isfile(path)
970 - def lstat(self, path):
971 return os.lstat(path)
972 - def listdir(self, path):
973 return os.listdir(path)
974 - def makedirs(self, path):
975 return os.makedirs(path)
976 - def mkdir(self, path):
977 return os.mkdir(path)
978 - def rename(self, old, new):
979 return os.rename(old, new)
980 - def stat(self, path):
981 return os.stat(path)
984 - def open(self, path):
985 return open(path)
988 989 if hasattr(os, 'symlink'): 992 else: 995 996 if hasattr(os, 'readlink'): 999 else:
1002 1003 1004 #class RemoteFS: 1005 # # Skeleton for the obvious methods we might need from the 1006 # # abstraction layer for a remote filesystem. 1007 # def upload(self, local_src, remote_dst): 1008 # pass 1009 # def download(self, remote_src, local_dst): 1010 # pass 1011 1012
1013 -class FS(LocalFS):
1014 1015 memoizer_counters = [] 1016
1017 - def __init__(self, path = None):
1018 """Initialize the Node.FS subsystem. 1019 1020 The supplied path is the top of the source tree, where we 1021 expect to find the top-level build file. If no path is 1022 supplied, the current directory is the default. 1023 1024 The path argument must be a valid absolute path. 1025 """ 1026 if __debug__: logInstanceCreation(self, 'Node.FS') 1027 1028 self._memo = {} 1029 1030 self.Root = {} 1031 self.SConstruct_dir = None 1032 self.max_drift = default_max_drift 1033 1034 self.Top = None 1035 if path is None: 1036 self.pathTop = os.getcwd() 1037 else: 1038 self.pathTop = path 1039 self.defaultDrive = _my_normcase(os.path.splitdrive(self.pathTop)[0]) 1040 1041 self.Top = self.Dir(self.pathTop) 1042 self.Top.path = '.' 1043 self.Top.tpath = '.' 1044 self._cwd = self.Top 1045 1046 DirNodeInfo.fs = self 1047 FileNodeInfo.fs = self
1048
1049 - def set_SConstruct_dir(self, dir):
1050 self.SConstruct_dir = dir
1051
1052 - def get_max_drift(self):
1053 return self.max_drift
1054
1055 - def set_max_drift(self, max_drift):
1056 self.max_drift = max_drift
1057
1058 - def getcwd(self):
1059 return self._cwd
1060
1061 - def chdir(self, dir, change_os_dir=0):
1062 """Change the current working directory for lookups. 1063 If change_os_dir is true, we will also change the "real" cwd 1064 to match. 1065 """ 1066 curr=self._cwd 1067 try: 1068 if not dir is None: 1069 self._cwd = dir 1070 if change_os_dir: 1071 os.chdir(dir.abspath) 1072 except OSError: 1073 self._cwd = curr 1074 raise
1075
1076 - def get_root(self, drive):
1077 """ 1078 Returns the root directory for the specified drive, creating 1079 it if necessary. 1080 """ 1081 drive = _my_normcase(drive) 1082 try: 1083 return self.Root[drive] 1084 except KeyError: 1085 root = RootDir(drive, self) 1086 self.Root[drive] = root 1087 if not drive: 1088 self.Root[self.defaultDrive] = root 1089 elif drive == self.defaultDrive: 1090 self.Root[''] = root 1091 return root
1092
1093 - def _lookup(self, p, directory, fsclass, create=1):
1094 """ 1095 The generic entry point for Node lookup with user-supplied data. 1096 1097 This translates arbitrary input into a canonical Node.FS object 1098 of the specified fsclass. The general approach for strings is 1099 to turn it into a fully normalized absolute path and then call 1100 the root directory's lookup_abs() method for the heavy lifting. 1101 1102 If the path name begins with '#', it is unconditionally 1103 interpreted relative to the top-level directory of this FS. '#' 1104 is treated as a synonym for the top-level SConstruct directory, 1105 much like '~' is treated as a synonym for the user's home 1106 directory in a UNIX shell. So both '#foo' and '#/foo' refer 1107 to the 'foo' subdirectory underneath the top-level SConstruct 1108 directory. 1109 1110 If the path name is relative, then the path is looked up relative 1111 to the specified directory, or the current directory (self._cwd, 1112 typically the SConscript directory) if the specified directory 1113 is None. 1114 """ 1115 if isinstance(p, Base): 1116 # It's already a Node.FS object. Make sure it's the right 1117 # class and return. 1118 p.must_be_same(fsclass) 1119 return p 1120 # str(p) in case it's something like a proxy object 1121 p = str(p) 1122 1123 initial_hash = (p[0:1] == '#') 1124 if initial_hash: 1125 # There was an initial '#', so we strip it and override 1126 # whatever directory they may have specified with the 1127 # top-level SConstruct directory. 1128 p = p[1:] 1129 directory = self.Top 1130 1131 if directory and not isinstance(directory, Dir): 1132 directory = self.Dir(directory) 1133 1134 if do_splitdrive: 1135 drive, p = os.path.splitdrive(p) 1136 else: 1137 drive = '' 1138 if drive and not p: 1139 # This causes a naked drive letter to be treated as a synonym 1140 # for the root directory on that drive. 1141 p = os.sep 1142 absolute = os.path.isabs(p) 1143 1144 needs_normpath = needs_normpath_check.match(p) 1145 1146 if initial_hash or not absolute: 1147 # This is a relative lookup, either to the top-level 1148 # SConstruct directory (because of the initial '#') or to 1149 # the current directory (the path name is not absolute). 1150 # Add the string to the appropriate directory lookup path, 1151 # after which the whole thing gets normalized. 1152 if not directory: 1153 directory = self._cwd 1154 if p: 1155 p = directory.labspath + '/' + p 1156 else: 1157 p = directory.labspath 1158 1159 if needs_normpath: 1160 p = os.path.normpath(p) 1161 1162 if drive or absolute: 1163 root = self.get_root(drive) 1164 else: 1165 if not directory: 1166 directory = self._cwd 1167 root = directory.root 1168 1169 if os.sep != '/': 1170 p = string.replace(p, os.sep, '/') 1171 return root._lookup_abs(p, fsclass, create)
1172
1173 - def Entry(self, name, directory = None, create = 1):
1174 """Lookup or create a generic Entry node with the specified name. 1175 If the name is a relative path (begins with ./, ../, or a file 1176 name), then it is looked up relative to the supplied directory 1177 node, or to the top level directory of the FS (supplied at 1178 construction time) if no directory is supplied. 1179 """ 1180 return self._lookup(name, directory, Entry, create)
1181
1182 - def File(self, name, directory = None, create = 1):
1183 """Lookup or create a File node with the specified name. If 1184 the name is a relative path (begins with ./, ../, or a file name), 1185 then it is looked up relative to the supplied directory node, 1186 or to the top level directory of the FS (supplied at construction 1187 time) if no directory is supplied. 1188 1189 This method will raise TypeError if a directory is found at the 1190 specified path. 1191 """ 1192 return self._lookup(name, directory, File, create)
1193
1194 - def Dir(self, name, directory = None, create = True):
1195 """Lookup or create a Dir node with the specified name. If 1196 the name is a relative path (begins with ./, ../, or a file name), 1197 then it is looked up relative to the supplied directory node, 1198 or to the top level directory of the FS (supplied at construction 1199 time) if no directory is supplied. 1200 1201 This method will raise TypeError if a normal file is found at the 1202 specified path. 1203 """ 1204 return self._lookup(name, directory, Dir, create)
1205
1206 - def VariantDir(self, variant_dir, src_dir, duplicate=1):
1207 """Link the supplied variant directory to the source directory 1208 for purposes of building files.""" 1209 1210 if not isinstance(src_dir, SCons.Node.Node): 1211 src_dir = self.Dir(src_dir) 1212 if not isinstance(variant_dir, SCons.Node.Node): 1213 variant_dir = self.Dir(variant_dir) 1214 if src_dir.is_under(variant_dir): 1215 raise SCons.Errors.UserError, "Source directory cannot be under variant directory." 1216 if variant_dir.srcdir: 1217 if variant_dir.srcdir == src_dir: 1218 return # We already did this. 1219 raise SCons.Errors.UserError, "'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir) 1220 variant_dir.link(src_dir, duplicate)
1221
1222 - def Repository(self, *dirs):
1223 """Specify Repository directories to search.""" 1224 for d in dirs: 1225 if not isinstance(d, SCons.Node.Node): 1226 d = self.Dir(d) 1227 self.Top.addRepository(d)
1228
1229 - def variant_dir_target_climb(self, orig, dir, tail):
1230 """Create targets in corresponding variant directories 1231 1232 Climb the directory tree, and look up path names 1233 relative to any linked variant directories we find. 1234 1235 Even though this loops and walks up the tree, we don't memoize 1236 the return value because this is really only used to process 1237 the command-line targets. 1238 """ 1239 targets = [] 1240 message = None 1241 fmt = "building associated VariantDir targets: %s" 1242 start_dir = dir 1243 while dir: 1244 for bd in dir.variant_dirs: 1245 if start_dir.is_under(bd): 1246 # If already in the build-dir location, don't reflect 1247 return [orig], fmt % str(orig) 1248 p = apply(os.path.join, [bd.path] + tail) 1249 targets.append(self.Entry(p)) 1250 tail = [dir.name] + tail 1251 dir = dir.up() 1252 if targets: 1253 message = fmt % string.join(map(str, targets)) 1254 return targets, message
1255
1256 - def Glob(self, pathname, ondisk=True, source=True, strings=False, cwd=None):
1257 """ 1258 Globs 1259 1260 This is mainly a shim layer 1261 """ 1262 if cwd is None: 1263 cwd = self.getcwd() 1264 return cwd.glob(pathname, ondisk, source, strings)
1265
1266 -class DirNodeInfo(SCons.Node.NodeInfoBase):
1267 # This should get reset by the FS initialization. 1268 current_version_id = 1 1269 1270 fs = None 1271
1272 - def str_to_node(self, s):
1273 top = self.fs.Top 1274 root = top.root 1275 if do_splitdrive: 1276 drive, s = os.path.splitdrive(s) 1277 if drive: 1278 root = self.fs.get_root(drive) 1279 if not os.path.isabs(s): 1280 s = top.labspath + '/' + s 1281 return root._lookup_abs(s, Entry)
1282
1283 -class DirBuildInfo(SCons.Node.BuildInfoBase):
1284 current_version_id = 1
1285 1286 glob_magic_check = re.compile('[*?[]') 1287
1288 -def has_glob_magic(s):
1289 return glob_magic_check.search(s) is not None
1290
1291 -class Dir(Base):
1292 """A class for directories in a file system. 1293 """ 1294 1295 memoizer_counters = [] 1296 1297 NodeInfo = DirNodeInfo 1298 BuildInfo = DirBuildInfo 1299
1300 - def __init__(self, name, directory, fs):
1301 if __debug__: logInstanceCreation(self, 'Node.FS.Dir') 1302 Base.__init__(self, name, directory, fs) 1303 self._morph()
1304
1305 - def _morph(self):
1306 """Turn a file system Node (either a freshly initialized directory 1307 object or a separate Entry object) into a proper directory object. 1308 1309 Set up this directory's entries and hook it into the file 1310 system tree. Specify that directories (this Node) don't use 1311 signatures for calculating whether they're current. 1312 """ 1313 1314 self.repositories = [] 1315 self.srcdir = None 1316 1317 self.entries = {} 1318 self.entries['.'] = self 1319 self.entries['..'] = self.dir 1320 self.cwd = self 1321 self.searched = 0 1322 self._sconsign = None 1323 self.variant_dirs = [] 1324 self.root = self.dir.root 1325 1326 # Don't just reset the executor, replace its action list, 1327 # because it might have some pre-or post-actions that need to 1328 # be preserved. 1329 self.builder = get_MkdirBuilder() 1330 self.get_executor().set_action_list(self.builder.action)
1331
1332 - def diskcheck_match(self):
1333 diskcheck_match(self, self.isfile, 1334 "File %s found where directory expected.")
1335
1336 - def __clearRepositoryCache(self, duplicate=None):
1337 """Called when we change the repository(ies) for a directory. 1338 This clears any cached information that is invalidated by changing 1339 the repository.""" 1340 1341 for node in self.entries.values(): 1342 if node != self.dir: 1343 if node != self and isinstance(node, Dir): 1344 node.__clearRepositoryCache(duplicate) 1345 else: 1346 node.clear() 1347 try: 1348 del node._srcreps 1349 except AttributeError: 1350 pass 1351 if duplicate != None: 1352 node.duplicate=duplicate
1353
1354 - def __resetDuplicate(self, node):
1355 if node != self: 1356 node.duplicate = node.get_dir().duplicate
1357
1358 - def Entry(self, name):
1359 """ 1360 Looks up or creates an entry node named 'name' relative to 1361 this directory. 1362 """ 1363 return self.fs.Entry(name, self)
1364
1365 - def Dir(self, name, create=True):
1366 """ 1367 Looks up or creates a directory node named 'name' relative to 1368 this directory. 1369 """ 1370 dir = self.fs.Dir(name, self, create) 1371 return dir
1372
1373 - def File(self, name):
1374 """ 1375 Looks up or creates a file node named 'name' relative to 1376 this directory. 1377 """ 1378 return self.fs.File(name, self)
1379
1380 - def _lookup_rel(self, name, klass, create=1):
1381 """ 1382 Looks up a *normalized* relative path name, relative to this 1383 directory. 1384 1385 This method is intended for use by internal lookups with 1386 already-normalized path data. For general-purpose lookups, 1387 use the Entry(), Dir() and File() methods above. 1388 1389 This method does *no* input checking and will die or give 1390 incorrect results if it's passed a non-normalized path name (e.g., 1391 a path containing '..'), an absolute path name, a top-relative 1392 ('#foo') path name, or any kind of object. 1393 """ 1394 name = self.entry_labspath(name) 1395 return self.root._lookup_abs(name, klass, create)
1396 1404
1405 - def getRepositories(self):
1406 """Returns a list of repositories for this directory. 1407 """ 1408 if self.srcdir and not self.duplicate: 1409 return self.srcdir.get_all_rdirs() + self.repositories 1410 return self.repositories
1411 1412 memoizer_counters.append(SCons.Memoize.CountValue('get_all_rdirs')) 1413
1414 - def get_all_rdirs(self):
1415 try: 1416 return list(self._memo['get_all_rdirs']) 1417 except KeyError: 1418 pass 1419 1420 result = [self] 1421 fname = '.' 1422 dir = self 1423 while dir: 1424 for rep in dir.getRepositories(): 1425 result.append(rep.Dir(fname)) 1426 if fname == '.': 1427 fname = dir.name 1428 else: 1429 fname = dir.name + os.sep + fname 1430 dir = dir.up() 1431 1432 self._memo['get_all_rdirs'] = list(result) 1433 1434 return result
1435
1436 - def addRepository(self, dir):
1437 if dir != self and not dir in self.repositories: 1438 self.repositories.append(dir) 1439 dir.tpath = '.' 1440 self.__clearRepositoryCache()
1441
1442 - def up(self):
1443 return self.entries['..']
1444
1445 - def _rel_path_key(self, other):
1446 return str(other)
1447 1448 memoizer_counters.append(SCons.Memoize.CountDict('rel_path', _rel_path_key)) 1449
1450 - def rel_path(self, other):
1451 """Return a path to "other" relative to this directory. 1452 """ 1453 1454 # This complicated and expensive method, which constructs relative 1455 # paths between arbitrary Node.FS objects, is no longer used 1456 # by SCons itself. It was introduced to store dependency paths 1457 # in .sconsign files relative to the target, but that ended up 1458 # being significantly inefficient. 1459 # 1460 # We're continuing to support the method because some SConstruct 1461 # files out there started using it when it was available, and 1462 # we're all about backwards compatibility.. 1463 1464 try: 1465 memo_dict = self._memo['rel_path'] 1466 except KeyError: 1467 memo_dict = {} 1468 self._memo['rel_path'] = memo_dict 1469 else: 1470 try: 1471 return memo_dict[other] 1472 except KeyError: 1473 pass 1474 1475 if self is other: 1476 1477 result = '.' 1478 1479 elif not other in self.path_elements: 1480 1481 try: 1482 other_dir = other.get_dir() 1483 except AttributeError: 1484 result = str(other) 1485 else: 1486 if other_dir is None: 1487 result = other.name 1488 else: 1489 dir_rel_path = self.rel_path(other_dir) 1490 if dir_rel_path == '.': 1491 result = other.name 1492 else: 1493 result = dir_rel_path + os.sep + other.name 1494 1495 else: 1496 1497 i = self.path_elements.index(other) + 1 1498 1499 path_elems = ['..'] * (len(self.path_elements) - i) \ 1500 + map(lambda n: n.name, other.path_elements[i:]) 1501 1502 result = string.join(path_elems, os.sep) 1503 1504 memo_dict[other] = result 1505 1506 return result
1507
1508 - def get_env_scanner(self, env, kw={}):
1509 import SCons.Defaults 1510 return SCons.Defaults.DirEntryScanner
1511
1512 - def get_target_scanner(self):
1513 import SCons.Defaults 1514 return SCons.Defaults.DirEntryScanner
1515
1516 - def get_found_includes(self, env, scanner, path):
1517 """Return this directory's implicit dependencies. 1518 1519 We don't bother caching the results because the scan typically 1520 shouldn't be requested more than once (as opposed to scanning 1521 .h file contents, which can be requested as many times as the 1522 files is #included by other files). 1523 """ 1524 if not scanner: 1525 return [] 1526 # Clear cached info for this Dir. If we already visited this 1527 # directory on our walk down the tree (because we didn't know at 1528 # that point it was being used as the source for another Node) 1529 # then we may have calculated build signature before realizing 1530 # we had to scan the disk. Now that we have to, though, we need 1531 # to invalidate the old calculated signature so that any node 1532 # dependent on our directory structure gets one that includes 1533 # info about everything on disk. 1534 self.clear() 1535 return scanner(self, env, path)
1536 1537 # 1538 # Taskmaster interface subsystem 1539 # 1540
1541 - def prepare(self):
1542 pass
1543
1544 - def build(self, **kw):
1545 """A null "builder" for directories.""" 1546 global MkdirBuilder 1547 if not self.builder is MkdirBuilder: 1548 apply(SCons.Node.Node.build, [self,], kw)
1549 1550 # 1551 # 1552 # 1553
1554 - def _create(self):
1555 """Create this directory, silently and without worrying about 1556 whether the builder is the default or not.""" 1557 listDirs = [] 1558 parent = self 1559 while parent: 1560 if parent.exists(): 1561 break 1562 listDirs.append(parent) 1563 p = parent.up() 1564 if p is None: 1565 raise SCons.Errors.StopError, parent.path 1566 parent = p 1567 listDirs.reverse() 1568 for dirnode in listDirs: 1569 try: 1570 # Don't call dirnode.build(), call the base Node method 1571 # directly because we definitely *must* create this 1572 # directory. The dirnode.build() method will suppress 1573 # the build if it's the default builder. 1574 SCons.Node.Node.build(dirnode) 1575 dirnode.get_executor().nullify() 1576 # The build() action may or may not have actually 1577 # created the directory, depending on whether the -n 1578 # option was used or not. Delete the _exists and 1579 # _rexists attributes so they can be reevaluated. 1580 dirnode.clear() 1581 except OSError: 1582 pass
1583
1585 global MkdirBuilder 1586 return not self.builder is MkdirBuilder and self.has_builder()
1587
1588 - def alter_targets(self):
1589 """Return any corresponding targets in a variant directory. 1590 """ 1591 return self.fs.variant_dir_target_climb(self, self, [])
1592
1593 - def scanner_key(self):
1594 """A directory does not get scanned.""" 1595 return None
1596
1597 - def get_contents(self):
1598 """Return content signatures and names of all our children 1599 separated by new-lines. Ensure that the nodes are sorted.""" 1600 contents = [] 1601 name_cmp = lambda a, b: cmp(a.name, b.name) 1602 sorted_children = self.children()[:] 1603 sorted_children.sort(name_cmp) 1604 for node in sorted_children: 1605 contents.append('%s %s\n' % (node.get_csig(), node.name)) 1606 return string.join(contents, '')
1607
1608 - def get_csig(self):
1609 """Compute the content signature for Directory nodes. In 1610 general, this is not needed and the content signature is not 1611 stored in the DirNodeInfo. However, if get_contents on a Dir 1612 node is called which has a child directory, the child 1613 directory should return the hash of its contents.""" 1614 contents = self.get_contents() 1615 return SCons.Util.MD5signature(contents)
1616
1617 - def do_duplicate(self, src):
1618 pass
1619 1620 changed_since_last_build = SCons.Node.Node.state_has_changed 1621
1622 - def is_up_to_date(self):
1623 """If any child is not up-to-date, then this directory isn't, 1624 either.""" 1625 if not self.builder is MkdirBuilder and not self.exists(): 1626 return 0 1627 up_to_date = SCons.Node.up_to_date 1628 for kid in self.children(): 1629 if kid.get_state() > up_to_date: 1630 return 0 1631 return 1
1632
1633 - def rdir(self):
1634 if not self.exists(): 1635 norm_name = _my_normcase(self.name) 1636 for dir in self.dir.get_all_rdirs(): 1637 try: node = dir.entries[norm_name] 1638 except KeyError: node = dir.dir_on_disk(self.name) 1639 if node and node.exists() and \ 1640 (isinstance(dir, Dir) or isinstance(dir, Entry)): 1641 return node 1642 return self
1643
1644 - def sconsign(self):
1645 """Return the .sconsign file info for this directory, 1646 creating it first if necessary.""" 1647 if not self._sconsign: 1648 import SCons.SConsign 1649 self._sconsign = SCons.SConsign.ForDirectory(self) 1650 return self._sconsign
1651
1652 - def srcnode(self):
1653 """Dir has a special need for srcnode()...if we 1654 have a srcdir attribute set, then that *is* our srcnode.""" 1655 if self.srcdir: 1656 return self.srcdir 1657 return Base.srcnode(self)
1658
1659 - def get_timestamp(self):
1660 """Return the latest timestamp from among our children""" 1661 stamp = 0 1662 for kid in self.children(): 1663 if kid.get_timestamp() > stamp: 1664 stamp = kid.get_timestamp() 1665 return stamp
1666
1667 - def entry_abspath(self, name):
1668 return self.abspath + os.sep + name
1669
1670 - def entry_labspath(self, name):
1671 return self.labspath + '/' + name
1672
1673 - def entry_path(self, name):
1674 return self.path + os.sep + name
1675
1676 - def entry_tpath(self, name):
1677 return self.tpath + os.sep + name
1678
1679 - def entry_exists_on_disk(self, name):
1680 try: 1681 d = self.on_disk_entries 1682 except AttributeError: 1683 d = {} 1684 try: 1685 entries = os.listdir(self.abspath) 1686 except OSError: 1687 pass 1688 else: 1689 for entry in map(_my_normcase, entries): 1690 d[entry] = 1 1691 self.on_disk_entries = d 1692 return d.has_key(_my_normcase(name))
1693 1694 memoizer_counters.append(SCons.Memoize.CountValue('srcdir_list')) 1695
1696 - def srcdir_list(self):
1697 try: 1698 return self._memo['srcdir_list'] 1699 except KeyError: 1700 pass 1701 1702 result = [] 1703 1704 dirname = '.' 1705 dir = self 1706 while dir: 1707 if dir.srcdir: 1708 result.append(dir.srcdir.Dir(dirname)) 1709 dirname = dir.name + os.sep + dirname 1710 dir = dir.up() 1711 1712 self._memo['srcdir_list'] = result 1713 1714 return result
1715
1716 - def srcdir_duplicate(self, name):
1717 for dir in self.srcdir_list(): 1718 if self.is_under(dir): 1719 # We shouldn't source from something in the build path; 1720 # variant_dir is probably under src_dir, in which case 1721 # we are reflecting. 1722 break 1723 if dir.entry_exists_on_disk(name): 1724 srcnode = dir.Entry(name).disambiguate() 1725 if self.duplicate: 1726 node = self.Entry(name).disambiguate() 1727 node.do_duplicate(srcnode) 1728 return node 1729 else: 1730 return srcnode 1731 return None
1732
1733 - def _srcdir_find_file_key(self, filename):
1734 return filename
1735 1736 memoizer_counters.append(SCons.Memoize.CountDict('srcdir_find_file', _srcdir_find_file_key)) 1737
1738 - def srcdir_find_file(self, filename):
1739 try: 1740 memo_dict = self._memo['srcdir_find_file'] 1741 except KeyError: 1742 memo_dict = {} 1743 self._memo['srcdir_find_file'] = memo_dict 1744 else: 1745 try: 1746 return memo_dict[filename] 1747 except KeyError: 1748 pass 1749 1750 def func(node): 1751 if (isinstance(node, File) or isinstance(node, Entry)) and \ 1752 (node.is_derived() or node.exists()): 1753 return node 1754 return None
1755 1756 norm_name = _my_normcase(filename) 1757 1758 for rdir in self.get_all_rdirs(): 1759 try: node = rdir.entries[norm_name] 1760 except KeyError: node = rdir.file_on_disk(filename) 1761 else: node = func(node) 1762 if node: 1763 result = (node, self) 1764 memo_dict[filename] = result 1765 return result 1766 1767 for srcdir in self.srcdir_list(): 1768 for rdir in srcdir.get_all_rdirs(): 1769 try: node = rdir.entries[norm_name] 1770 except KeyError: node = rdir.file_on_disk(filename) 1771 else: node = func(node) 1772 if node: 1773 result = (File(filename, self, self.fs), srcdir) 1774 memo_dict[filename] = result 1775 return result 1776 1777 result = (None, None) 1778 memo_dict[filename] = result 1779 return result
1780
1781 - def dir_on_disk(self, name):
1782 if self.entry_exists_on_disk(name): 1783 try: return self.Dir(name) 1784 except TypeError: pass 1785 node = self.srcdir_duplicate(name) 1786 if isinstance(node, File): 1787 return None 1788 return node
1789
1790 - def file_on_disk(self, name):
1791 if self.entry_exists_on_disk(name) or \ 1792 diskcheck_rcs(self, name) or \ 1793 diskcheck_sccs(self, name): 1794 try: return self.File(name) 1795 except TypeError: pass 1796 node = self.srcdir_duplicate(name) 1797 if isinstance(node, Dir): 1798 node = None 1799 return node
1800
1801 - def walk(self, func, arg):
1802 """ 1803 Walk this directory tree by calling the specified function 1804 for each directory in the tree. 1805 1806 This behaves like the os.path.walk() function, but for in-memory 1807 Node.FS.Dir objects. The function takes the same arguments as 1808 the functions passed to os.path.walk(): 1809 1810 func(arg, dirname, fnames) 1811 1812 Except that "dirname" will actually be the directory *Node*, 1813 not the string. The '.' and '..' entries are excluded from 1814 fnames. The fnames list may be modified in-place to filter the 1815 subdirectories visited or otherwise impose a specific order. 1816 The "arg" argument is always passed to func() and may be used 1817 in any way (or ignored, passing None is common). 1818 """ 1819 entries = self.entries 1820 names = entries.keys() 1821 names.remove('.') 1822 names.remove('..') 1823 func(arg, self, names) 1824 select_dirs = lambda n, e=entries: isinstance(e[n], Dir) 1825 for dirname in filter(select_dirs, names): 1826 entries[dirname].walk(func, arg)
1827
1828 - def glob(self, pathname, ondisk=True, source=False, strings=False):
1829 """ 1830 Returns a list of Nodes (or strings) matching a specified 1831 pathname pattern. 1832 1833 Pathname patterns follow UNIX shell semantics: * matches 1834 any-length strings of any characters, ? matches any character, 1835 and [] can enclose lists or ranges of characters. Matches do 1836 not span directory separators. 1837 1838 The matches take into account Repositories, returning local 1839 Nodes if a corresponding entry exists in a Repository (either 1840 an in-memory Node or something on disk). 1841 1842 By defafult, the glob() function matches entries that exist 1843 on-disk, in addition to in-memory Nodes. Setting the "ondisk" 1844 argument to False (or some other non-true value) causes the glob() 1845 function to only match in-memory Nodes. The default behavior is 1846 to return both the on-disk and in-memory Nodes. 1847 1848 The "source" argument, when true, specifies that corresponding 1849 source Nodes must be returned if you're globbing in a build 1850 directory (initialized with VariantDir()). The default behavior 1851 is to return Nodes local to the VariantDir(). 1852 1853 The "strings" argument, when true, returns the matches as strings, 1854 not Nodes. The strings are path names relative to this directory. 1855 1856 The underlying algorithm is adapted from the glob.glob() function 1857 in the Python library (but heavily modified), and uses fnmatch() 1858 under the covers. 1859 """ 1860 dirname, basename = os.path.split(pathname) 1861 if not dirname: 1862 return self._glob1(basename, ondisk, source, strings) 1863 if has_glob_magic(dirname): 1864 list = self.glob(dirname, ondisk, source, strings=False) 1865 else: 1866 list = [self.Dir(dirname, create=True)] 1867 result = [] 1868 for dir in list: 1869 r = dir._glob1(basename, ondisk, source, strings) 1870 if strings: 1871 r = map(lambda x, d=str(dir): os.path.join(d, x), r) 1872 result.extend(r) 1873 result.sort(lambda a, b: cmp(str(a), str(b))) 1874 return result
1875
1876 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
1877 """ 1878 Globs for and returns a list of entry names matching a single 1879 pattern in this directory. 1880 1881 This searches any repositories and source directories for 1882 corresponding entries and returns a Node (or string) relative 1883 to the current directory if an entry is found anywhere. 1884 1885 TODO: handle pattern with no wildcard 1886 """ 1887 search_dir_list = self.get_all_rdirs() 1888 for srcdir in self.srcdir_list(): 1889 search_dir_list.extend(srcdir.get_all_rdirs()) 1890 1891 names = [] 1892 for dir in search_dir_list: 1893 # We use the .name attribute from the Node because the keys of 1894 # the dir.entries dictionary are normalized (that is, all upper 1895 # case) on case-insensitive systems like Windows. 1896 #node_names = [ v.name for k, v in dir.entries.items() if k not in ('.', '..') ] 1897 entry_names = filter(lambda n: n not in ('.', '..'), dir.entries.keys()) 1898 node_names = map(lambda n, e=dir.entries: e[n].name, entry_names) 1899 names.extend(node_names) 1900 if ondisk: 1901 try: 1902 disk_names = os.listdir(dir.abspath) 1903 except os.error: 1904 pass 1905 else: 1906 names.extend(disk_names) 1907 if not strings: 1908 # We're going to return corresponding Nodes in 1909 # the local directory, so we need to make sure 1910 # those Nodes exist. We only want to create 1911 # Nodes for the entries that will match the 1912 # specified pattern, though, which means we 1913 # need to filter the list here, even though 1914 # the overall list will also be filtered later, 1915 # after we exit this loop. 1916 if pattern[0] != '.': 1917 #disk_names = [ d for d in disk_names if d[0] != '.' ] 1918 disk_names = filter(lambda x: x[0] != '.', disk_names) 1919 disk_names = fnmatch.filter(disk_names, pattern) 1920 rep_nodes = map(dir.Entry, disk_names) 1921 #rep_nodes = [ n.disambiguate() for n in rep_nodes ] 1922 rep_nodes = map(lambda n: n.disambiguate(), rep_nodes) 1923 for node, name in izip(rep_nodes, disk_names): 1924 n = self.Entry(name) 1925 if n.__class__ != node.__class__: 1926 n.__class__ = node.__class__ 1927 n._morph() 1928 1929 names = set(names) 1930 if pattern[0] != '.': 1931 #names = [ n for n in names if n[0] != '.' ] 1932 names = filter(lambda x: x[0] != '.', names) 1933 names = fnmatch.filter(names, pattern) 1934 1935 if strings: 1936 return names 1937 1938 #return [ self.entries[_my_normcase(n)] for n in names ] 1939 return map(lambda n, e=self.entries: e[_my_normcase(n)], names)
1940
1941 -class RootDir(Dir):
1942 """A class for the root directory of a file system. 1943 1944 This is the same as a Dir class, except that the path separator 1945 ('/' or '\\') is actually part of the name, so we don't need to 1946 add a separator when creating the path names of entries within 1947 this directory. 1948 """
1949 - def __init__(self, name, fs):
1950 if __debug__: logInstanceCreation(self, 'Node.FS.RootDir') 1951 # We're going to be our own parent directory (".." entry and .dir 1952 # attribute) so we have to set up some values so Base.__init__() 1953 # won't gag won't it calls some of our methods. 1954 self.abspath = '' 1955 self.labspath = '' 1956 self.path = '' 1957 self.tpath = '' 1958 self.path_elements = [] 1959 self.duplicate = 0 1960 self.root = self 1961 Base.__init__(self, name, self, fs) 1962 1963 # Now set our paths to what we really want them to be: the 1964 # initial drive letter (the name) plus the directory separator, 1965 # except for the "lookup abspath," which does not have the 1966 # drive letter. 1967 self.abspath = name + os.sep 1968 self.labspath = '' 1969 self.path = name + os.sep 1970 self.tpath = name + os.sep 1971 self._morph() 1972 1973 self._lookupDict = {} 1974 1975 # The // and os.sep + os.sep entries are necessary because 1976 # os.path.normpath() seems to preserve double slashes at the 1977 # beginning of a path (presumably for UNC path names), but 1978 # collapses triple slashes to a single slash. 1979 self._lookupDict[''] = self 1980 self._lookupDict['/'] = self 1981 self._lookupDict['//'] = self 1982 self._lookupDict[os.sep] = self 1983 self._lookupDict[os.sep + os.sep] = self
1984
1985 - def must_be_same(self, klass):
1986 if klass is Dir: 1987 return 1988 Base.must_be_same(self, klass)
1989
1990 - def _lookup_abs(self, p, klass, create=1):
1991 """ 1992 Fast (?) lookup of a *normalized* absolute path. 1993 1994 This method is intended for use by internal lookups with 1995 already-normalized path data. For general-purpose lookups, 1996 use the FS.Entry(), FS.Dir() or FS.File() methods. 1997 1998 The caller is responsible for making sure we're passed a 1999 normalized absolute path; we merely let Python's dictionary look 2000 up and return the One True Node.FS object for the path. 2001 2002 If no Node for the specified "p" doesn't already exist, and 2003 "create" is specified, the Node may be created after recursive 2004 invocation to find or create the parent directory or directories. 2005 """ 2006 k = _my_normcase(p) 2007 try: 2008 result = self._lookupDict[k] 2009 except KeyError: 2010 if not create: 2011 raise SCons.Errors.UserError 2012 # There is no Node for this path name, and we're allowed 2013 # to create it. 2014 dir_name, file_name = os.path.split(p) 2015 dir_node = self._lookup_abs(dir_name, Dir) 2016 result = klass(file_name, dir_node, self.fs) 2017 2018 # Double-check on disk (as configured) that the Node we 2019 # created matches whatever is out there in the real world. 2020 result.diskcheck_match() 2021 2022 self._lookupDict[k] = result 2023 dir_node.entries[_my_normcase(file_name)] = result 2024 dir_node.implicit = None 2025 else: 2026 # There is already a Node for this path name. Allow it to 2027 # complain if we were looking for an inappropriate type. 2028 result.must_be_same(klass) 2029 return result
2030
2031 - def __str__(self):
2032 return self.abspath
2033
2034 - def entry_abspath(self, name):
2035 return self.abspath + name
2036
2037 - def entry_labspath(self, name):
2038 return '/' + name
2039
2040 - def entry_path(self, name):
2041 return self.path + name
2042
2043 - def entry_tpath(self, name):
2044 return self.tpath + name
2045
2046 - def is_under(self, dir):
2047 if self is dir: 2048 return 1 2049 else: 2050 return 0
2051
2052 - def up(self):
2053 return None
2054
2055 - def get_dir(self):
2056 return None
2057
2058 - def src_builder(self):
2059 return _null
2060
2061 -class FileNodeInfo(SCons.Node.NodeInfoBase):
2062 current_version_id = 1 2063 2064 field_list = ['csig', 'timestamp', 'size'] 2065 2066 # This should get reset by the FS initialization. 2067 fs = None 2068
2069 - def str_to_node(self, s):
2070 top = self.fs.Top 2071 root = top.root 2072 if do_splitdrive: 2073 drive, s = os.path.splitdrive(s) 2074 if drive: 2075 root = self.fs.get_root(drive) 2076 if not os.path.isabs(s): 2077 s = top.labspath + '/' + s 2078 return root._lookup_abs(s, Entry)
2079
2080 -class FileBuildInfo(SCons.Node.BuildInfoBase):
2081 current_version_id = 1 2082
2083 - def convert_to_sconsign(self):
2084 """ 2085 Converts this FileBuildInfo object for writing to a .sconsign file 2086 2087 This replaces each Node in our various dependency lists with its 2088 usual string representation: relative to the top-level SConstruct 2089 directory, or an absolute path if it's outside. 2090 """ 2091 if os.sep == '/': 2092 node_to_str = str 2093 else: 2094 def node_to_str(n): 2095 try: 2096 s = n.path 2097 except AttributeError: 2098 s = str(n) 2099 else: 2100 s = string.replace(s, os.sep, '/') 2101 return s
2102 for attr in ['bsources', 'bdepends', 'bimplicit']: 2103 try: 2104 val = getattr(self, attr) 2105 except AttributeError: 2106 pass 2107 else: 2108 setattr(self, attr, map(node_to_str, val))
2109 - def convert_from_sconsign(self, dir, name):
2110 """ 2111 Converts a newly-read FileBuildInfo object for in-SCons use 2112 2113 For normal up-to-date checking, we don't have any conversion to 2114 perform--but we're leaving this method here to make that clear. 2115 """ 2116 pass
2117 - def prepare_dependencies(self):
2118 """ 2119 Prepares a FileBuildInfo object for explaining what changed 2120 2121 The bsources, bdepends and bimplicit lists have all been 2122 stored on disk as paths relative to the top-level SConstruct 2123 directory. Convert the strings to actual Nodes (for use by the 2124 --debug=explain code and --implicit-cache). 2125 """ 2126 attrs = [ 2127 ('bsources', 'bsourcesigs'), 2128 ('bdepends', 'bdependsigs'), 2129 ('bimplicit', 'bimplicitsigs'), 2130 ] 2131 for (nattr, sattr) in attrs: 2132 try: 2133 strings = getattr(self, nattr) 2134 nodeinfos = getattr(self, sattr) 2135 except AttributeError: 2136 pass 2137 else: 2138 nodes = [] 2139 for s, ni in izip(strings, nodeinfos): 2140 if not isinstance(s, SCons.Node.Node): 2141 s = ni.str_to_node(s) 2142 nodes.append(s) 2143 setattr(self, nattr, nodes)
2144 - def format(self, names=0):
2145 result = [] 2146 bkids = self.bsources + self.bdepends + self.bimplicit 2147 bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs 2148 for bkid, bkidsig in izip(bkids, bkidsigs): 2149 result.append(str(bkid) + ': ' + 2150 string.join(bkidsig.format(names=names), ' ')) 2151 result.append('%s [%s]' % (self.bactsig, self.bact)) 2152 return string.join(result, '\n')
2153
2154 -class File(Base):
2155 """A class for files in a file system. 2156 """ 2157 2158 memoizer_counters = [] 2159 2160 NodeInfo = FileNodeInfo 2161 BuildInfo = FileBuildInfo 2162 2163 md5_chunksize = 64 2164
2165 - def diskcheck_match(self):
2166 diskcheck_match(self, self.isdir, 2167 "Directory %s found where file expected.")
2168
2169 - def __init__(self, name, directory, fs):
2170 if __debug__: logInstanceCreation(self, 'Node.FS.File') 2171 Base.__init__(self, name, directory, fs) 2172 self._morph()
2173
2174 - def Entry(self, name):
2175 """Create an entry node named 'name' relative to 2176 the SConscript directory of this file.""" 2177 cwd = self.cwd or self.fs._cwd 2178 return cwd.Entry(name)
2179
2180 - def Dir(self, name, create=True):
2181 """Create a directory node named 'name' relative to 2182 the SConscript directory of this file.""" 2183 cwd = self.cwd or self.fs._cwd 2184 return cwd.Dir(name, create)
2185
2186 - def Dirs(self, pathlist):
2187 """Create a list of directories relative to the SConscript 2188 directory of this file.""" 2189 return map(lambda p, s=self: s.Dir(p), pathlist)
2190
2191 - def File(self, name):
2192 """Create a file node named 'name' relative to 2193 the SConscript directory of this file.""" 2194 cwd = self.cwd or self.fs._cwd 2195 return cwd.File(name)
2196 2197 #def generate_build_dict(self): 2198 # """Return an appropriate dictionary of values for building 2199 # this File.""" 2200 # return {'Dir' : self.Dir, 2201 # 'File' : self.File, 2202 # 'RDirs' : self.RDirs} 2203
2204 - def _morph(self):
2205 """Turn a file system node into a File object.""" 2206 self.scanner_paths = {} 2207 if not hasattr(self, '_local'): 2208 self._local = 0 2209 2210 # If there was already a Builder set on this entry, then 2211 # we need to make sure we call the target-decider function, 2212 # not the source-decider. Reaching in and doing this by hand 2213 # is a little bogus. We'd prefer to handle this by adding 2214 # an Entry.builder_set() method that disambiguates like the 2215 # other methods, but that starts running into problems with the 2216 # fragile way we initialize Dir Nodes with their Mkdir builders, 2217 # yet still allow them to be overridden by the user. Since it's 2218 # not clear right now how to fix that, stick with what works 2219 # until it becomes clear... 2220 if self.has_builder(): 2221 self.changed_since_last_build = self.decide_target
2222
2223 - def scanner_key(self):
2224 return self.get_suffix()
2225
2226 - def get_contents(self):
2227 if not self.rexists(): 2228 return '' 2229 fname = self.rfile().abspath 2230 try: 2231 r = open(fname, "rb").read() 2232 except EnvironmentError, e: 2233 if not e.filename: 2234 e.filename = fname 2235 raise 2236 return r
2237
2238 - def get_content_hash(self):
2239 """ 2240 Compute and return the MD5 hash for this file. 2241 """ 2242 if not self.rexists(): 2243 return SCons.Util.MD5signature('') 2244 fname = self.rfile().abspath 2245 try: 2246 cs = SCons.Util.MD5filesignature(fname, 2247 chunksize=SCons.Node.FS.File.md5_chunksize*1024) 2248 except EnvironmentError, e: 2249 if not e.filename: 2250 e.filename = fname 2251 raise 2252 return cs
2253 2254 2255 memoizer_counters.append(SCons.Memoize.CountValue('get_size')) 2256
2257 - def get_size(self):
2258 try: 2259 return self._memo['get_size'] 2260 except KeyError: 2261 pass 2262 2263 if self.rexists(): 2264 size = self.rfile().getsize() 2265 else: 2266 size = 0 2267 2268 self._memo['get_size'] = size 2269 2270 return size
2271 2272 memoizer_counters.append(SCons.Memoize.CountValue('get_timestamp')) 2273
2274 - def get_timestamp(self):
2275 try: 2276 return self._memo['get_timestamp'] 2277 except KeyError: 2278 pass 2279 2280 if self.rexists(): 2281 timestamp = self.rfile().getmtime() 2282 else: 2283 timestamp = 0 2284 2285 self._memo['get_timestamp'] = timestamp 2286 2287 return timestamp
2288
2289 - def store_info(self):
2290 # Merge our build information into the already-stored entry. 2291 # This accomodates "chained builds" where a file that's a target 2292 # in one build (SConstruct file) is a source in a different build. 2293 # See test/chained-build.py for the use case. 2294 if do_store_info: 2295 self.dir.sconsign().store_info(self.name, self)
2296 2297 convert_copy_attrs = [ 2298 'bsources', 2299 'bimplicit', 2300 'bdepends', 2301 'bact', 2302 'bactsig', 2303 'ninfo', 2304 ] 2305 2306 2307 convert_sig_attrs = [ 2308 'bsourcesigs', 2309 'bimplicitsigs', 2310 'bdependsigs', 2311 ] 2312
2313 - def convert_old_entry(self, old_entry):
2314 # Convert a .sconsign entry from before the Big Signature 2315 # Refactoring, doing what we can to convert its information 2316 # to the new .sconsign entry format. 2317 # 2318 # The old format looked essentially like this: 2319 # 2320 # BuildInfo 2321 # .ninfo (NodeInfo) 2322 # .bsig 2323 # .csig 2324 # .timestamp 2325 # .size 2326 # .bsources 2327 # .bsourcesigs ("signature" list) 2328 # .bdepends 2329 # .bdependsigs ("signature" list) 2330 # .bimplicit 2331 # .bimplicitsigs ("signature" list) 2332 # .bact 2333 # .bactsig 2334 # 2335 # The new format looks like this: 2336 # 2337 # .ninfo (NodeInfo) 2338 # .bsig 2339 # .csig 2340 # .timestamp 2341 # .size 2342 # .binfo (BuildInfo) 2343 # .bsources 2344 # .bsourcesigs (NodeInfo list) 2345 # .bsig 2346 # .csig 2347 # .timestamp 2348 # .size 2349 # .bdepends 2350 # .bdependsigs (NodeInfo list) 2351 # .bsig 2352 # .csig 2353 # .timestamp 2354 # .size 2355 # .bimplicit 2356 # .bimplicitsigs (NodeInfo list) 2357 # .bsig 2358 # .csig 2359 # .timestamp 2360 # .size 2361 # .bact 2362 # .bactsig 2363 # 2364 # The basic idea of the new structure is that a NodeInfo always 2365 # holds all available information about the state of a given Node 2366 # at a certain point in time. The various .b*sigs lists can just 2367 # be a list of pointers to the .ninfo attributes of the different 2368 # dependent nodes, without any copying of information until it's 2369 # time to pickle it for writing out to a .sconsign file. 2370 # 2371 # The complicating issue is that the *old* format only stored one 2372 # "signature" per dependency, based on however the *last* build 2373 # was configured. We don't know from just looking at it whether 2374 # it was a build signature, a content signature, or a timestamp 2375 # "signature". Since we no longer use build signatures, the 2376 # best we can do is look at the length and if it's thirty two, 2377 # assume that it was (or might have been) a content signature. 2378 # If it was actually a build signature, then it will cause a 2379 # rebuild anyway when it doesn't match the new content signature, 2380 # but that's probably the best we can do. 2381 import SCons.SConsign 2382 new_entry = SCons.SConsign.SConsignEntry() 2383 new_entry.binfo = self.new_binfo() 2384 binfo = new_entry.binfo 2385 for attr in self.convert_copy_attrs: 2386 try: 2387 value = getattr(old_entry, attr) 2388 except AttributeError: 2389 pass 2390 else: 2391 setattr(binfo, attr, value) 2392 delattr(old_entry, attr) 2393 for attr in self.convert_sig_attrs: 2394 try: 2395 sig_list = getattr(old_entry, attr) 2396 except AttributeError: 2397 pass 2398 else: 2399 value = [] 2400 for sig in sig_list: 2401 ninfo = self.new_ninfo() 2402 if len(sig) == 32: 2403 ninfo.csig = sig 2404 else: 2405 ninfo.timestamp = sig 2406 value.append(ninfo) 2407 setattr(binfo, attr, value) 2408 delattr(old_entry, attr) 2409 return new_entry
2410 2411 memoizer_counters.append(SCons.Memoize.CountValue('get_stored_info')) 2412
2413 - def get_stored_info(self):
2414 try: 2415 return self._memo['get_stored_info'] 2416 except KeyError: 2417 pass 2418 2419 try: 2420 sconsign_entry = self.dir.sconsign().get_entry(self.name) 2421 except (KeyError, EnvironmentError): 2422 import SCons.SConsign 2423 sconsign_entry = SCons.SConsign.SConsignEntry() 2424 sconsign_entry.binfo = self.new_binfo() 2425 sconsign_entry.ninfo = self.new_ninfo() 2426 else: 2427 if isinstance(sconsign_entry, FileBuildInfo): 2428 # This is a .sconsign file from before the Big Signature 2429 # Refactoring; convert it as best we can. 2430 sconsign_entry = self.convert_old_entry(sconsign_entry) 2431 try: 2432 delattr(sconsign_entry.ninfo, 'bsig') 2433 except AttributeError: 2434 pass 2435 2436 self._memo['get_stored_info'] = sconsign_entry 2437 2438 return sconsign_entry
2439
2440 - def get_stored_implicit(self):
2441 binfo = self.get_stored_info().binfo 2442 binfo.prepare_dependencies() 2443 try: return binfo.bimplicit 2444 except AttributeError: return None
2445
2446 - def rel_path(self, other):
2447 return self.dir.rel_path(other)
2448
2449 - def _get_found_includes_key(self, env, scanner, path):
2450 return (id(env), id(scanner), path)
2451 2452 memoizer_counters.append(SCons.Memoize.CountDict('get_found_includes', _get_found_includes_key)) 2453
2454 - def get_found_includes(self, env, scanner, path):
2455 """Return the included implicit dependencies in this file. 2456 Cache results so we only scan the file once per path 2457 regardless of how many times this information is requested. 2458 """ 2459 memo_key = (id(env), id(scanner), path) 2460 try: 2461 memo_dict = self._memo['get_found_includes'] 2462 except KeyError: 2463 memo_dict = {} 2464 self._memo['get_found_includes'] = memo_dict 2465 else: 2466 try: 2467 return memo_dict[memo_key] 2468 except KeyError: 2469 pass 2470 2471 if scanner: 2472 result = scanner(self, env, path) 2473 result = map(lambda N: N.disambiguate(), result) 2474 else: 2475 result = [] 2476 2477 memo_dict[memo_key] = result 2478 2479 return result
2480
2481 - def _createDir(self):
2482 # ensure that the directories for this node are 2483 # created. 2484 self.dir._create()
2485
2486 - def retrieve_from_cache(self):
2487 """Try to retrieve the node's content from a cache 2488 2489 This method is called from multiple threads in a parallel build, 2490 so only do thread safe stuff here. Do thread unsafe stuff in 2491 built(). 2492 2493 Returns true iff the node was successfully retrieved. 2494 """ 2495 if self.nocache: 2496 return None 2497 if not self.is_derived(): 2498 return None 2499 return self.get_build_env().get_CacheDir().retrieve(self)
2500
2501 - def built(self):
2502 """ 2503 Called just after this node is successfully built. 2504 """ 2505 # Push this file out to cache before the superclass Node.built() 2506 # method has a chance to clear the build signature, which it 2507 # will do if this file has a source scanner. 2508 # 2509 # We have to clear the memoized values *before* we push it to 2510 # cache so that the memoization of the self.exists() return 2511 # value doesn't interfere. 2512 self.clear_memoized_values() 2513 if self.exists(): 2514 self.get_build_env().get_CacheDir().push(self) 2515 SCons.Node.Node.built(self)
2516
2517 - def visited(self):
2518 if self.exists(): 2519 self.get_build_env().get_CacheDir().push_if_forced(self) 2520 2521 ninfo = self.get_ninfo() 2522 2523 csig = self.get_max_drift_csig() 2524 if csig: 2525 ninfo.csig = csig 2526 2527 ninfo.timestamp = self.get_timestamp() 2528 ninfo.size = self.get_size() 2529 2530 if not self.has_builder(): 2531 # This is a source file, but it might have been a target file 2532 # in another build that included more of the DAG. Copy 2533 # any build information that's stored in the .sconsign file 2534 # into our binfo object so it doesn't get lost. 2535 old = self.get_stored_info() 2536 self.get_binfo().__dict__.update(old.binfo.__dict__) 2537 2538 self.store_info()
2539
2540 - def find_src_builder(self):
2541 if self.rexists(): 2542 return None 2543 scb = self.dir.src_builder() 2544 if scb is _null: 2545 if diskcheck_sccs(self.dir, self.name): 2546 scb = get_DefaultSCCSBuilder() 2547 elif diskcheck_rcs(self.dir, self.name): 2548 scb = get_DefaultRCSBuilder() 2549 else: 2550 scb = None 2551 if scb is not None: 2552 try: 2553 b = self.builder 2554 except AttributeError: 2555 b = None 2556 if b is None: 2557 self.builder_set(scb) 2558 return scb
2559
2560 - def has_src_builder(self):
2561 """Return whether this Node has a source builder or not. 2562 2563 If this Node doesn't have an explicit source code builder, this 2564 is where we figure out, on the fly, if there's a transparent 2565 source code builder for it. 2566 2567 Note that if we found a source builder, we also set the 2568 self.builder attribute, so that all of the methods that actually 2569 *build* this file don't have to do anything different. 2570 """ 2571 try: 2572 scb = self.sbuilder 2573 except AttributeError: 2574 scb = self.sbuilder = self.find_src_builder() 2575 return not scb is None
2576
2577 - def alter_targets(self):
2578 """Return any corresponding targets in a variant directory. 2579 """ 2580 if self.is_derived(): 2581 return [], None 2582 return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
2583
2584 - def _rmv_existing(self):
2585 self.clear_memoized_values() 2586 e = Unlink(self, [], None) 2587 if isinstance(e, SCons.Errors.BuildError): 2588 raise e
2589 2590 # 2591 # Taskmaster interface subsystem 2592 # 2593
2594 - def make_ready(self):
2595 self.has_src_builder() 2596 self.get_binfo()
2597
2598 - def prepare(self):
2599 """Prepare for this file to be created.""" 2600 SCons.Node.Node.prepare(self) 2601 2602 if self.get_state() != SCons.Node.up_to_date: 2603 if self.exists(): 2604 if self.is_derived() and not self.precious: 2605 self._rmv_existing() 2606 else: 2607 try: 2608 self._createDir() 2609 except SCons.Errors.StopError, drive: 2610 desc = "No drive `%s' for target `%s'." % (drive, self) 2611 raise SCons.Errors.StopError, desc
2612 2613 # 2614 # 2615 # 2616
2617 - def remove(self):
2618 """Remove this file.""" 2619 if self.exists() or self.islink(): 2620 self.fs.unlink(self.path) 2621 return 1 2622 return None
2623
2624 - def do_duplicate(self, src):
2625 self._createDir() 2626 Unlink(self, None, None) 2627 e = Link(self, src, None) 2628 if isinstance(e, SCons.Errors.BuildError): 2629 desc = "Cannot duplicate `%s' in `%s': %s." % (src.path, self.dir.path, e.errstr) 2630 raise SCons.Errors.StopError, desc 2631 self.linked = 1 2632 # The Link() action may or may not have actually 2633 # created the file, depending on whether the -n 2634 # option was used or not. Delete the _exists and 2635 # _rexists attributes so they can be reevaluated. 2636 self.clear()
2637 2638 memoizer_counters.append(SCons.Memoize.CountValue('exists')) 2639
2640 - def exists(self):
2641 try: 2642 return self._memo['exists'] 2643 except KeyError: 2644 pass 2645 # Duplicate from source path if we are set up to do this. 2646 if self.duplicate and not self.is_derived() and not self.linked: 2647 src = self.srcnode() 2648 if not src is self: 2649 # At this point, src is meant to be copied in a variant directory. 2650 src = src.rfile() 2651 if src.abspath != self.abspath: 2652 if src.exists(): 2653 self.do_duplicate(src) 2654 # Can't return 1 here because the duplication might 2655 # not actually occur if the -n option is being used. 2656 else: 2657 # The source file does not exist. Make sure no old 2658 # copy remains in the variant directory. 2659 if Base.exists(self) or self.islink(): 2660 self.fs.unlink(self.path) 2661 # Return None explicitly because the Base.exists() call 2662 # above will have cached its value if the file existed. 2663 self._memo['exists'] = None 2664 return None 2665 result = Base.exists(self) 2666 self._memo['exists'] = result 2667 return result
2668 2669 # 2670 # SIGNATURE SUBSYSTEM 2671 # 2672
2673 - def get_max_drift_csig(self):
2674 """ 2675 Returns the content signature currently stored for this node 2676 if it's been unmodified longer than the max_drift value, or the 2677 max_drift value is 0. Returns None otherwise. 2678 """ 2679 old = self.get_stored_info() 2680 mtime = self.get_timestamp() 2681 2682 csig = None 2683 max_drift = self.fs.max_drift 2684 if max_drift > 0: 2685 if (time.time() - mtime) > max_drift: 2686 try: 2687 n = old.ninfo 2688 if n.timestamp and n.csig and n.timestamp == mtime: 2689 csig = n.csig 2690 except AttributeError: 2691 pass 2692 elif max_drift == 0: 2693 try: 2694 csig = old.ninfo.csig 2695 except AttributeError: 2696 pass 2697 2698 return csig
2699
2700 - def get_csig(self):
2701 """ 2702 Generate a node's content signature, the digested signature 2703 of its content. 2704 2705 node - the node 2706 cache - alternate node to use for the signature cache 2707 returns - the content signature 2708 """ 2709 ninfo = self.get_ninfo() 2710 try: 2711 return ninfo.csig 2712 except AttributeError: 2713 pass 2714 2715 csig = self.get_max_drift_csig() 2716 if csig is None: 2717 2718 try: 2719 if self.get_size() < SCons.Node.FS.File.md5_chunksize: 2720 contents = self.get_contents() 2721 else: 2722 csig = self.get_content_hash() 2723 except IOError: 2724 # This can happen if there's actually a directory on-disk, 2725 # which can be the case if they've disabled disk checks, 2726 # or if an action with a File target actually happens to 2727 # create a same-named directory by mistake. 2728 csig = '' 2729 else: 2730 if not csig: 2731 csig = SCons.Util.MD5signature(contents) 2732 2733 ninfo.csig = csig 2734 2735 return csig
2736 2737 # 2738 # DECISION SUBSYSTEM 2739 # 2740
2741 - def builder_set(self, builder):
2742 SCons.Node.Node.builder_set(self, builder) 2743 self.changed_since_last_build = self.decide_target
2744
2745 - def changed_content(self, target, prev_ni):
2746 cur_csig = self.get_csig() 2747 try: 2748 return cur_csig != prev_ni.csig 2749 except AttributeError: 2750 return 1
2751
2752 - def changed_state(self, target, prev_ni):
2753 return (self.state != SCons.Node.up_to_date)
2754
2755 - def changed_timestamp_then_content(self, target, prev_ni):
2756 if not self.changed_timestamp_match(target, prev_ni): 2757 try: 2758 self.get_ninfo().csig = prev_ni.csig 2759 except AttributeError: 2760 pass 2761 return False 2762 return self.changed_content(target, prev_ni)
2763
2764 - def changed_timestamp_newer(self, target, prev_ni):
2765 try: 2766 return self.get_timestamp() > target.get_timestamp() 2767 except AttributeError: 2768 return 1
2769
2770 - def changed_timestamp_match(self, target, prev_ni):
2771 try: 2772 return self.get_timestamp() != prev_ni.timestamp 2773 except AttributeError: 2774 return 1
2775
2776 - def decide_source(self, target, prev_ni):
2777 return target.get_build_env().decide_source(self, target, prev_ni)
2778
2779 - def decide_target(self, target, prev_ni):
2780 return target.get_build_env().decide_target(self, target, prev_ni)
2781 2782 # Initialize this Node's decider function to decide_source() because 2783 # every file is a source file until it has a Builder attached... 2784 changed_since_last_build = decide_source 2785
2786 - def is_up_to_date(self):
2787 T = 0 2788 if T: Trace('is_up_to_date(%s):' % self) 2789 if not self.exists(): 2790 if T: Trace(' not self.exists():') 2791 # The file doesn't exist locally... 2792 r = self.rfile() 2793 if r != self: 2794 # ...but there is one in a Repository... 2795 if not self.changed(r): 2796 if T: Trace(' changed(%s):' % r) 2797 # ...and it's even up-to-date... 2798 if self._local: 2799 # ...and they'd like a local copy. 2800 e = LocalCopy(self, r, None) 2801 if isinstance(e, SCons.Errors.BuildError): 2802 raise 2803 self.store_info() 2804 if T: Trace(' 1\n') 2805 return 1 2806 self.changed() 2807 if T: Trace(' None\n') 2808 return None 2809 else: 2810 r = self.changed() 2811 if T: Trace(' self.exists(): %s\n' % r) 2812 return not r
2813 2814 memoizer_counters.append(SCons.Memoize.CountValue('rfile')) 2815
2816 - def rfile(self):
2817 try: 2818 return self._memo['rfile'] 2819 except KeyError: 2820 pass 2821 result = self 2822 if not self.exists(): 2823 norm_name = _my_normcase(self.name) 2824 for dir in self.dir.get_all_rdirs(): 2825 try: node = dir.entries[norm_name] 2826 except KeyError: node = dir.file_on_disk(self.name) 2827 if node and node.exists() and \ 2828 (isinstance(node, File) or isinstance(node, Entry) \ 2829 or not node.is_derived()): 2830 result = node 2831 break 2832 self._memo['rfile'] = result 2833 return result
2834
2835 - def rstr(self):
2836 return str(self.rfile())
2837
2838 - def get_cachedir_csig(self):
2839 """ 2840 Fetch a Node's content signature for purposes of computing 2841 another Node's cachesig. 2842 2843 This is a wrapper around the normal get_csig() method that handles 2844 the somewhat obscure case of using CacheDir with the -n option. 2845 Any files that don't exist would normally be "built" by fetching 2846 them from the cache, but the normal get_csig() method will try 2847 to open up the local file, which doesn't exist because the -n 2848 option meant we didn't actually pull the file from cachedir. 2849 But since the file *does* actually exist in the cachedir, we 2850 can use its contents for the csig. 2851 """ 2852 try: 2853 return self.cachedir_csig 2854 except AttributeError: 2855 pass 2856 2857 cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self) 2858 if not self.exists() and cachefile and os.path.exists(cachefile): 2859 self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \ 2860 SCons.Node.FS.File.md5_chunksize * 1024) 2861 else: 2862 self.cachedir_csig = self.get_csig() 2863 return self.cachedir_csig
2864
2865 - def get_cachedir_bsig(self):
2866 try: 2867 return self.cachesig 2868 except AttributeError: 2869 pass 2870 2871 # Add the path to the cache signature, because multiple 2872 # targets built by the same action will all have the same 2873 # build signature, and we have to differentiate them somehow. 2874 children = self.children() 2875 sigs = map(lambda n: n.get_cachedir_csig(), children) 2876 executor = self.get_executor() 2877 sigs.append(SCons.Util.MD5signature(executor.get_contents())) 2878 sigs.append(self.path) 2879 self.cachesig = SCons.Util.MD5collect(sigs) 2880 return self.cachesig
2881 2882 2883 default_fs = None 2884
2885 -def get_default_fs():
2886 global default_fs 2887 if not default_fs: 2888 default_fs = FS() 2889 return default_fs
2890
2891 -class FileFinder:
2892 """ 2893 """ 2894 if SCons.Memoize.use_memoizer: 2895 __metaclass__ = SCons.Memoize.Memoized_Metaclass 2896 2897 memoizer_counters = [] 2898
2899 - def __init__(self):
2900 self._memo = {}
2901
2902 - def filedir_lookup(self, p, fd=None):
2903 """ 2904 A helper method for find_file() that looks up a directory for 2905 a file we're trying to find. This only creates the Dir Node if 2906 it exists on-disk, since if the directory doesn't exist we know 2907 we won't find any files in it... :-) 2908 2909 It would be more compact to just use this as a nested function 2910 with a default keyword argument (see the commented-out version 2911 below), but that doesn't work unless you have nested scopes, 2912 so we define it here just so this work under Python 1.5.2. 2913 """ 2914 if fd is None: 2915 fd = self.default_filedir 2916 dir, name = os.path.split(fd) 2917 drive, d = os.path.splitdrive(dir) 2918 if d in ('/', os.sep): 2919 return p.fs.get_root(drive).dir_on_disk(name) 2920 if dir: 2921 p = self.filedir_lookup(p, dir) 2922 if not p: 2923 return None 2924 norm_name = _my_normcase(name) 2925 try: 2926 node = p.entries[norm_name] 2927 except KeyError: 2928 return p.dir_on_disk(name) 2929 if isinstance(node, Dir): 2930 return node 2931 if isinstance(node, Entry): 2932 node.must_be_same(Dir) 2933 return node 2934 return None
2935
2936 - def _find_file_key(self, filename, paths, verbose=None):
2937 return (filename, paths)
2938 2939 memoizer_counters.append(SCons.Memoize.CountDict('find_file', _find_file_key)) 2940
2941 - def find_file(self, filename, paths, verbose=None):
2942 """ 2943 find_file(str, [Dir()]) -> [nodes] 2944 2945 filename - a filename to find 2946 paths - a list of directory path *nodes* to search in. Can be 2947 represented as a list, a tuple, or a callable that is 2948 called with no arguments and returns the list or tuple. 2949 2950 returns - the node created from the found file. 2951 2952 Find a node corresponding to either a derived file or a file 2953 that exists already. 2954 2955 Only the first file found is returned, and none is returned 2956 if no file is found. 2957 """ 2958 memo_key = self._find_file_key(filename, paths) 2959 try: 2960 memo_dict = self._memo['find_file'] 2961 except KeyError: 2962 memo_dict = {} 2963 self._memo['find_file'] = memo_dict 2964 else: 2965 try: 2966 return memo_dict[memo_key] 2967 except KeyError: 2968 pass 2969 2970 if verbose: 2971 if not SCons.Util.is_String(verbose): 2972 verbose = "find_file" 2973 if not callable(verbose): 2974 verbose = ' %s: ' % verbose 2975 verbose = lambda s, v=verbose: sys.stdout.write(v + s) 2976 else: 2977 verbose = lambda x: x 2978 2979 filedir, filename = os.path.split(filename) 2980 if filedir: 2981 # More compact code that we can't use until we drop 2982 # support for Python 1.5.2: 2983 # 2984 #def filedir_lookup(p, fd=filedir): 2985 # """ 2986 # A helper function that looks up a directory for a file 2987 # we're trying to find. This only creates the Dir Node 2988 # if it exists on-disk, since if the directory doesn't 2989 # exist we know we won't find any files in it... :-) 2990 # """ 2991 # dir, name = os.path.split(fd) 2992 # if dir: 2993 # p = filedir_lookup(p, dir) 2994 # if not p: 2995 # return None 2996 # norm_name = _my_normcase(name) 2997 # try: 2998 # node = p.entries[norm_name] 2999 # except KeyError: 3000 # return p.dir_on_disk(name) 3001 # if isinstance(node, Dir): 3002 # return node 3003 # if isinstance(node, Entry): 3004 # node.must_be_same(Dir) 3005 # return node 3006 # if isinstance(node, Dir) or isinstance(node, Entry): 3007 # return node 3008 # return None 3009 #paths = filter(None, map(filedir_lookup, paths)) 3010 3011 self.default_filedir = filedir 3012 paths = filter(None, map(self.filedir_lookup, paths)) 3013 3014 result = None 3015 for dir in paths: 3016 verbose("looking for '%s' in '%s' ...\n" % (filename, dir)) 3017 node, d = dir.srcdir_find_file(filename) 3018 if node: 3019 verbose("... FOUND '%s' in '%s'\n" % (filename, d)) 3020 result = node 3021 break 3022 3023 memo_dict[memo_key] = result 3024 3025 return result
3026 3027 find_file = FileFinder().find_file 3028 3029
3030 -def invalidate_node_memos(targets):
3031 """ 3032 Invalidate the memoized values of all Nodes (files or directories) 3033 that are associated with the given entries. Has been added to 3034 clear the cache of nodes affected by a direct execution of an 3035 action (e.g. Delete/Copy/Chmod). Existing Node caches become 3036 inconsistent if the action is run through Execute(). The argument 3037 `targets` can be a single Node object or filename, or a sequence 3038 of Nodes/filenames. 3039 """ 3040 from traceback import extract_stack 3041 3042 # First check if the cache really needs to be flushed. Only 3043 # actions run in the SConscript with Execute() seem to be 3044 # affected. XXX The way to check if Execute() is in the stacktrace 3045 # is a very dirty hack and should be replaced by a more sensible 3046 # solution. 3047 must_invalidate = 0 3048 tb = extract_stack() 3049 for f in tb: 3050 if f[2] == 'Execute' and f[0][-14:] == 'Environment.py': 3051 must_invalidate = 1 3052 if not must_invalidate: 3053 return 3054 3055 if not SCons.Util.is_List(targets): 3056 targets = [targets] 3057 3058 for entry in targets: 3059 # If the target is a Node object, clear the cache. If it is a 3060 # filename, look up potentially existing Node object first. 3061 try: 3062 entry.clear_memoized_values() 3063 except AttributeError: 3064 # Not a Node object, try to look up Node by filename. XXX 3065 # This creates Node objects even for those filenames which 3066 # do not correspond to an existing Node object. 3067 node = get_default_fs().Entry(entry) 3068 if node: 3069 node.clear_memoized_values()
3070