Package SCons :: Package Node :: Module FS
[hide private]
[frames] | no frames]

Source Code for Module SCons.Node.FS

   1  """scons.Node.FS 
   2   
   3  File system nodes. 
   4   
   5  These Nodes represent the canonical external objects that people think 
   6  of when they think of building software: files and directories. 
   7   
   8  This holds a "default_fs" variable that should be initialized with an FS 
   9  that can be used by scripts or modules looking for the canonical default. 
  10   
  11  """ 
  12   
  13  # 
  14  # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation 
  15  # 
  16  # Permission is hereby granted, free of charge, to any person obtaining 
  17  # a copy of this software and associated documentation files (the 
  18  # "Software"), to deal in the Software without restriction, including 
  19  # without limitation the rights to use, copy, modify, merge, publish, 
  20  # distribute, sublicense, and/or sell copies of the Software, and to 
  21  # permit persons to whom the Software is furnished to do so, subject to 
  22  # the following conditions: 
  23  # 
  24  # The above copyright notice and this permission notice shall be included 
  25  # in all copies or substantial portions of the Software. 
  26  # 
  27  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  28  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  29  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  30  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  31  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  32  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  33  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  34  # 
  35   
  36  __revision__ = "src/engine/SCons/Node/FS.py 3363 2008/09/06 07:34:10 scons" 
  37   
  38  import fnmatch 
  39  from itertools import izip 
  40  import os 
  41  import os.path 
  42  import re 
  43  import shutil 
  44  import stat 
  45  import string 
  46  import sys 
  47  import time 
  48  import cStringIO 
  49   
  50  import SCons.Action 
  51  from SCons.Debug import logInstanceCreation 
  52  import SCons.Errors 
  53  import SCons.Memoize 
  54  import SCons.Node 
  55  import SCons.Node.Alias 
  56  import SCons.Subst 
  57  import SCons.Util 
  58  import SCons.Warnings 
  59   
  60  from SCons.Debug import Trace 
  61   
  62  # The max_drift value:  by default, use a cached signature value for 
  63  # any file that's been untouched for more than two days. 
  64  default_max_drift = 2*24*60*60 
  65   
  66  # 
  67  # We stringify these file system Nodes a lot.  Turning a file system Node 
  68  # into a string is non-trivial, because the final string representation 
  69  # can depend on a lot of factors:  whether it's a derived target or not, 
  70  # whether it's linked to a repository or source directory, and whether 
  71  # there's duplication going on.  The normal technique for optimizing 
  72  # calculations like this is to memoize (cache) the string value, so you 
  73  # only have to do the calculation once. 
  74  # 
  75  # A number of the above factors, however, can be set after we've already 
  76  # been asked to return a string for a Node, because a Repository() or 
  77  # VariantDir() call or the like may not occur until later in SConscript 
  78  # files.  So this variable controls whether we bother trying to save 
  79  # string values for Nodes.  The wrapper interface can set this whenever 
  80  # they're done mucking with Repository and VariantDir and the other stuff, 
  81  # to let this module know it can start returning saved string values 
  82  # for Nodes. 
  83  # 
  84  Save_Strings = None 
  85   
86 -def save_strings(val):
87 global Save_Strings 88 Save_Strings = val
89 90 # 91 # Avoid unnecessary function calls by recording a Boolean value that 92 # tells us whether or not os.path.splitdrive() actually does anything 93 # on this system, and therefore whether we need to bother calling it 94 # when looking up path names in various methods below. 95 # 96 97 do_splitdrive = None 98
99 -def initialize_do_splitdrive():
100 global do_splitdrive 101 drive, path = os.path.splitdrive('X:/foo') 102 do_splitdrive = not not drive
103 104 initialize_do_splitdrive() 105 106 # 107 108 needs_normpath_check = None 109
110 -def initialize_normpath_check():
111 """ 112 Initialize the normpath_check regular expression. 113 114 This function is used by the unit tests to re-initialize the pattern 115 when testing for behavior with different values of os.sep. 116 """ 117 global needs_normpath_check 118 if os.sep == '/': 119 pattern = r'.*/|\.$|\.\.$' 120 else: 121 pattern = r'.*[/%s]|\.$|\.\.$' % re.escape(os.sep) 122 needs_normpath_check = re.compile(pattern)
123 124 initialize_normpath_check() 125 126 # 127 # SCons.Action objects for interacting with the outside world. 128 # 129 # The Node.FS methods in this module should use these actions to 130 # create and/or remove files and directories; they should *not* use 131 # os.{link,symlink,unlink,mkdir}(), etc., directly. 132 # 133 # Using these SCons.Action objects ensures that descriptions of these 134 # external activities are properly displayed, that the displays are 135 # suppressed when the -s (silent) option is used, and (most importantly) 136 # the actions are disabled when the the -n option is used, in which case 137 # there should be *no* changes to the external file system(s)... 138 # 139 140 if hasattr(os, 'link'): 153 else: 154 _hardlink_func = None 155 156 if hasattr(os, 'symlink'): 159 else: 160 _softlink_func = None 161
162 -def _copy_func(fs, src, dest):
163 shutil.copy2(src, dest) 164 st = fs.stat(src) 165 fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
166 167 168 Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy', 169 'hard-copy', 'soft-copy', 'copy'] 170 171 Link_Funcs = [] # contains the callables of the specified duplication style 172
173 -def set_duplicate(duplicate):
174 # Fill in the Link_Funcs list according to the argument 175 # (discarding those not available on the platform). 176 177 # Set up the dictionary that maps the argument names to the 178 # underlying implementations. We do this inside this function, 179 # not in the top-level module code, so that we can remap os.link 180 # and os.symlink for testing purposes. 181 link_dict = { 182 'hard' : _hardlink_func, 183 'soft' : _softlink_func, 184 'copy' : _copy_func 185 } 186 187 if not duplicate in Valid_Duplicates: 188 raise SCons.Errors.InternalError, ("The argument of set_duplicate " 189 "should be in Valid_Duplicates") 190 global Link_Funcs 191 Link_Funcs = [] 192 for func in string.split(duplicate,'-'): 193 if link_dict[func]: 194 Link_Funcs.append(link_dict[func])
195
196 -def LinkFunc(target, source, env):
197 # Relative paths cause problems with symbolic links, so 198 # we use absolute paths, which may be a problem for people 199 # who want to move their soft-linked src-trees around. Those 200 # people should use the 'hard-copy' mode, softlinks cannot be 201 # used for that; at least I have no idea how ... 202 src = source[0].abspath 203 dest = target[0].abspath 204 dir, file = os.path.split(dest) 205 if dir and not target[0].fs.isdir(dir): 206 os.makedirs(dir) 207 if not Link_Funcs: 208 # Set a default order of link functions. 209 set_duplicate('hard-soft-copy') 210 fs = source[0].fs 211 # Now link the files with the previously specified order. 212 for func in Link_Funcs: 213 try: 214 func(fs, src, dest) 215 break 216 except (IOError, OSError): 217 # An OSError indicates something happened like a permissions 218 # problem or an attempt to symlink across file-system 219 # boundaries. An IOError indicates something like the file 220 # not existing. In either case, keeping trying additional 221 # functions in the list and only raise an error if the last 222 # one failed. 223 if func == Link_Funcs[-1]: 224 # exception of the last link method (copy) are fatal 225 raise 226 else: 227 pass 228 return 0
229 230 Link = SCons.Action.Action(LinkFunc, None)
231 -def LocalString(target, source, env):
232 return 'Local copy of %s from %s' % (target[0], source[0])
233 234 LocalCopy = SCons.Action.Action(LinkFunc, LocalString) 235
236 -def UnlinkFunc(target, source, env):
237 t = target[0] 238 t.fs.unlink(t.abspath) 239 return 0
240 241 Unlink = SCons.Action.Action(UnlinkFunc, None) 242
243 -def MkdirFunc(target, source, env):
244 t = target[0] 245 if not t.exists(): 246 t.fs.mkdir(t.abspath) 247 return 0
248 249 Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None) 250 251 MkdirBuilder = None 252
253 -def get_MkdirBuilder():
254 global MkdirBuilder 255 if MkdirBuilder is None: 256 import SCons.Builder 257 import SCons.Defaults 258 # "env" will get filled in by Executor.get_build_env() 259 # calling SCons.Defaults.DefaultEnvironment() when necessary. 260 MkdirBuilder = SCons.Builder.Builder(action = Mkdir, 261 env = None, 262 explain = None, 263 is_explicit = None, 264 target_scanner = SCons.Defaults.DirEntryScanner, 265 name = "MkdirBuilder") 266 return MkdirBuilder
267
268 -class _Null:
269 pass
270 271 _null = _Null() 272 273 DefaultSCCSBuilder = None 274 DefaultRCSBuilder = None 275
276 -def get_DefaultSCCSBuilder():
277 global DefaultSCCSBuilder 278 if DefaultSCCSBuilder is None: 279 import SCons.Builder 280 # "env" will get filled in by Executor.get_build_env() 281 # calling SCons.Defaults.DefaultEnvironment() when necessary. 282 act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR') 283 DefaultSCCSBuilder = SCons.Builder.Builder(action = act, 284 env = None, 285 name = "DefaultSCCSBuilder") 286 return DefaultSCCSBuilder
287
288 -def get_DefaultRCSBuilder():
289 global DefaultRCSBuilder 290 if DefaultRCSBuilder is None: 291 import SCons.Builder 292 # "env" will get filled in by Executor.get_build_env() 293 # calling SCons.Defaults.DefaultEnvironment() when necessary. 294 act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR') 295 DefaultRCSBuilder = SCons.Builder.Builder(action = act, 296 env = None, 297 name = "DefaultRCSBuilder") 298 return DefaultRCSBuilder
299 300 # Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem. 301 _is_cygwin = sys.platform == "cygwin" 302 if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
303 - def _my_normcase(x):
304 return x
305 else:
306 - def _my_normcase(x):
307 return string.upper(x)
308 309 310
311 -class DiskChecker:
312 - def __init__(self, type, do, ignore):
313 self.type = type 314 self.do = do 315 self.ignore = ignore 316 self.set_do()
317 - def set_do(self):
318 self.__call__ = self.do
319 - def set_ignore(self):
320 self.__call__ = self.ignore
321 - def set(self, list):
322 if self.type in list: 323 self.set_do() 324 else: 325 self.set_ignore()
326
327 -def do_diskcheck_match(node, predicate, errorfmt):
328 result = predicate() 329 try: 330 # If calling the predicate() cached a None value from stat(), 331 # remove it so it doesn't interfere with later attempts to 332 # build this Node as we walk the DAG. (This isn't a great way 333 # to do this, we're reaching into an interface that doesn't 334 # really belong to us, but it's all about performance, so 335 # for now we'll just document the dependency...) 336 if node._memo['stat'] is None: 337 del node._memo['stat'] 338 except (AttributeError, KeyError): 339 pass 340 if result: 341 raise TypeError, errorfmt % node.abspath
342
343 -def ignore_diskcheck_match(node, predicate, errorfmt):
344 pass
345
346 -def do_diskcheck_rcs(node, name):
347 try: 348 rcs_dir = node.rcs_dir 349 except AttributeError: 350 if node.entry_exists_on_disk('RCS'): 351 rcs_dir = node.Dir('RCS') 352 else: 353 rcs_dir = None 354 node.rcs_dir = rcs_dir 355 if rcs_dir: 356 return rcs_dir.entry_exists_on_disk(name+',v') 357 return None
358
359 -def ignore_diskcheck_rcs(node, name):
360 return None
361
362 -def do_diskcheck_sccs(node, name):
363 try: 364 sccs_dir = node.sccs_dir 365 except AttributeError: 366 if node.entry_exists_on_disk('SCCS'): 367 sccs_dir = node.Dir('SCCS') 368 else: 369 sccs_dir = None 370 node.sccs_dir = sccs_dir 371 if sccs_dir: 372 return sccs_dir.entry_exists_on_disk('s.'+name) 373 return None
374
375 -def ignore_diskcheck_sccs(node, name):
376 return None
377 378 diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match) 379 diskcheck_rcs = DiskChecker('rcs', do_diskcheck_rcs, ignore_diskcheck_rcs) 380 diskcheck_sccs = DiskChecker('sccs', do_diskcheck_sccs, ignore_diskcheck_sccs) 381 382 diskcheckers = [ 383 diskcheck_match, 384 diskcheck_rcs, 385 diskcheck_sccs, 386 ] 387
388 -def set_diskcheck(list):
389 for dc in diskcheckers: 390 dc.set(list)
391
392 -def diskcheck_types():
393 return map(lambda dc: dc.type, diskcheckers)
394 395 396
397 -class EntryProxy(SCons.Util.Proxy):
398 - def __get_abspath(self):
399 entry = self.get() 400 return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(), 401 entry.name + "_abspath")
402
403 - def __get_filebase(self):
404 name = self.get().name 405 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0], 406 name + "_filebase")
407
408 - def __get_suffix(self):
409 name = self.get().name 410 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1], 411 name + "_suffix")
412
413 - def __get_file(self):
414 name = self.get().name 415 return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
416
417 - def __get_base_path(self):
418 """Return the file's directory and file name, with the 419 suffix stripped.""" 420 entry = self.get() 421 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0], 422 entry.name + "_base")
423
424 - def __get_posix_path(self):
425 """Return the path with / as the path separator, 426 regardless of platform.""" 427 if os.sep == '/': 428 return self 429 else: 430 entry = self.get() 431 r = string.replace(entry.get_path(), os.sep, '/') 432 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
433
434 - def __get_windows_path(self):
435 """Return the path with \ as the path separator, 436 regardless of platform.""" 437 if os.sep == '\\': 438 return self 439 else: 440 entry = self.get() 441 r = string.replace(entry.get_path(), os.sep, '\\') 442 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
443
444 - def __get_srcnode(self):
445 return EntryProxy(self.get().srcnode())
446
447 - def __get_srcdir(self):
448 """Returns the directory containing the source node linked to this 449 node via VariantDir(), or the directory of this node if not linked.""" 450 return EntryProxy(self.get().srcnode().dir)
451
452 - def __get_rsrcnode(self):
453 return EntryProxy(self.get().srcnode().rfile())
454
455 - def __get_rsrcdir(self):
456 """Returns the directory containing the source node linked to this 457 node via VariantDir(), or the directory of this node if not linked.""" 458 return EntryProxy(self.get().srcnode().rfile().dir)
459
460 - def __get_dir(self):
461 return EntryProxy(self.get().dir)
462 463 dictSpecialAttrs = { "base" : __get_base_path, 464 "posix" : __get_posix_path, 465 "windows" : __get_windows_path, 466 "win32" : __get_windows_path, 467 "srcpath" : __get_srcnode, 468 "srcdir" : __get_srcdir, 469 "dir" : __get_dir, 470 "abspath" : __get_abspath, 471 "filebase" : __get_filebase, 472 "suffix" : __get_suffix, 473 "file" : __get_file, 474 "rsrcpath" : __get_rsrcnode, 475 "rsrcdir" : __get_rsrcdir, 476 } 477
478 - def __getattr__(self, name):
479 # This is how we implement the "special" attributes 480 # such as base, posix, srcdir, etc. 481 try: 482 attr_function = self.dictSpecialAttrs[name] 483 except KeyError: 484 try: 485 attr = SCons.Util.Proxy.__getattr__(self, name) 486 except AttributeError: 487 entry = self.get() 488 classname = string.split(str(entry.__class__), '.')[-1] 489 if classname[-2:] == "'>": 490 # new-style classes report their name as: 491 # "<class 'something'>" 492 # instead of the classic classes: 493 # "something" 494 classname = classname[:-2] 495 raise AttributeError, "%s instance '%s' has no attribute '%s'" % (classname, entry.name, name) 496 return attr 497 else: 498 return attr_function(self)
499
500 -class Base(SCons.Node.Node):
501 """A generic class for file system entries. This class is for 502 when we don't know yet whether the entry being looked up is a file 503 or a directory. Instances of this class can morph into either 504 Dir or File objects by a later, more precise lookup. 505 506 Note: this class does not define __cmp__ and __hash__ for 507 efficiency reasons. SCons does a lot of comparing of 508 Node.FS.{Base,Entry,File,Dir} objects, so those operations must be 509 as fast as possible, which means we want to use Python's built-in 510 object identity comparisons. 511 """ 512 513 memoizer_counters = [] 514
515 - def __init__(self, name, directory, fs):
516 """Initialize a generic Node.FS.Base object. 517 518 Call the superclass initialization, take care of setting up 519 our relative and absolute paths, identify our parent 520 directory, and indicate that this node should use 521 signatures.""" 522 if __debug__: logInstanceCreation(self, 'Node.FS.Base') 523 SCons.Node.Node.__init__(self) 524 525 self.name = name 526 self.suffix = SCons.Util.splitext(name)[1] 527 self.fs = fs 528 529 assert directory, "A directory must be provided" 530 531 self.abspath = directory.entry_abspath(name) 532 self.labspath = directory.entry_labspath(name) 533 if directory.path == '.': 534 self.path = name 535 else: 536 self.path = directory.entry_path(name) 537 if directory.tpath == '.': 538 self.tpath = name 539 else: 540 self.tpath = directory.entry_tpath(name) 541 self.path_elements = directory.path_elements + [self] 542 543 self.dir = directory 544 self.cwd = None # will hold the SConscript directory for target nodes 545 self.duplicate = directory.duplicate
546
547 - def str_for_display(self):
548 return '"' + self.__str__() + '"'
549
550 - def must_be_same(self, klass):
551 """ 552 This node, which already existed, is being looked up as the 553 specified klass. Raise an exception if it isn't. 554 """ 555 if self.__class__ is klass or klass is Entry: 556 return 557 raise TypeError, "Tried to lookup %s '%s' as a %s." %\ 558 (self.__class__.__name__, self.path, klass.__name__)
559
560 - def get_dir(self):
561 return self.dir
562
563 - def get_suffix(self):
564 return self.suffix
565
566 - def rfile(self):
567 return self
568
569 - def __str__(self):
570 """A Node.FS.Base object's string representation is its path 571 name.""" 572 global Save_Strings 573 if Save_Strings: 574 return self._save_str() 575 return self._get_str()
576 577 memoizer_counters.append(SCons.Memoize.CountValue('_save_str')) 578
579 - def _save_str(self):
580 try: 581 return self._memo['_save_str'] 582 except KeyError: 583 pass 584 result = self._get_str() 585 self._memo['_save_str'] = result 586 return result
587
588 - def _get_str(self):
589 global Save_Strings 590 if self.duplicate or self.is_derived(): 591 return self.get_path() 592 srcnode = self.srcnode() 593 if srcnode.stat() is None and not self.stat() is None: 594 result = self.get_path() 595 else: 596 result = srcnode.get_path() 597 if not Save_Strings: 598 # We're not at the point where we're saving the string string 599 # representations of FS Nodes (because we haven't finished 600 # reading the SConscript files and need to have str() return 601 # things relative to them). That also means we can't yet 602 # cache values returned (or not returned) by stat(), since 603 # Python code in the SConscript files might still create 604 # or otherwise affect the on-disk file. So get rid of the 605 # values that the underlying stat() method saved. 606 try: del self._memo['stat'] 607 except KeyError: pass 608 if not self is srcnode: 609 try: del srcnode._memo['stat'] 610 except KeyError: pass 611 return result
612 613 rstr = __str__ 614 615 memoizer_counters.append(SCons.Memoize.CountValue('stat')) 616
617 - def stat(self):
618 try: return self._memo['stat'] 619 except KeyError: pass 620 try: result = self.fs.stat(self.abspath) 621 except os.error: result = None 622 self._memo['stat'] = result 623 return result
624
625 - def exists(self):
626 return not self.stat() is None
627
628 - def rexists(self):
629 return self.rfile().exists()
630
631 - def getmtime(self):
632 st = self.stat() 633 if st: return st[stat.ST_MTIME] 634 else: return None
635
636 - def getsize(self):
637 st = self.stat() 638 if st: return st[stat.ST_SIZE] 639 else: return None
640
641 - def isdir(self):
642 st = self.stat() 643 return not st is None and stat.S_ISDIR(st[stat.ST_MODE])
644
645 - def isfile(self):
646 st = self.stat() 647 return not st is None and stat.S_ISREG(st[stat.ST_MODE])
648 649 if hasattr(os, 'symlink'): 654 else: 657
658 - def is_under(self, dir):
659 if self is dir: 660 return 1 661 else: 662 return self.dir.is_under(dir)
663
664 - def set_local(self):
665 self._local = 1
666
667 - def srcnode(self):
668 """If this node is in a build path, return the node 669 corresponding to its source file. Otherwise, return 670 ourself. 671 """ 672 srcdir_list = self.dir.srcdir_list() 673 if srcdir_list: 674 srcnode = srcdir_list[0].Entry(self.name) 675 srcnode.must_be_same(self.__class__) 676 return srcnode 677 return self
678
679 - def get_path(self, dir=None):
680 """Return path relative to the current working directory of the 681 Node.FS.Base object that owns us.""" 682 if not dir: 683 dir = self.fs.getcwd() 684 if self == dir: 685 return '.' 686 path_elems = self.path_elements 687 try: i = path_elems.index(dir) 688 except ValueError: pass 689 else: path_elems = path_elems[i+1:] 690 path_elems = map(lambda n: n.name, path_elems) 691 return string.join(path_elems, os.sep)
692
693 - def set_src_builder(self, builder):
694 """Set the source code builder for this node.""" 695 self.sbuilder = builder 696 if not self.has_builder(): 697 self.builder_set(builder)
698
699 - def src_builder(self):
700 """Fetch the source code builder for this node. 701 702 If there isn't one, we cache the source code builder specified 703 for the directory (which in turn will cache the value from its 704 parent directory, and so on up to the file system root). 705 """ 706 try: 707 scb = self.sbuilder 708 except AttributeError: 709 scb = self.dir.src_builder() 710 self.sbuilder = scb 711 return scb
712
713 - def get_abspath(self):
714 """Get the absolute path of the file.""" 715 return self.abspath
716
717 - def for_signature(self):
718 # Return just our name. Even an absolute path would not work, 719 # because that can change thanks to symlinks or remapped network 720 # paths. 721 return self.name
722
723 - def get_subst_proxy(self):
724 try: 725 return self._proxy 726 except AttributeError: 727 ret = EntryProxy(self) 728 self._proxy = ret 729 return ret
730
731 - def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
732 """ 733 734 Generates a target entry that corresponds to this entry (usually 735 a source file) with the specified prefix and suffix. 736 737 Note that this method can be overridden dynamically for generated 738 files that need different behavior. See Tool/swig.py for 739 an example. 740 """ 741 return self.dir.Entry(prefix + splitext(self.name)[0] + suffix)
742
743 - def _Rfindalldirs_key(self, pathlist):
744 return pathlist
745 746 memoizer_counters.append(SCons.Memoize.CountDict('Rfindalldirs', _Rfindalldirs_key)) 747
748 - def Rfindalldirs(self, pathlist):
749 """ 750 Return all of the directories for a given path list, including 751 corresponding "backing" directories in any repositories. 752 753 The Node lookups are relative to this Node (typically a 754 directory), so memoizing result saves cycles from looking 755 up the same path for each target in a given directory. 756 """ 757 try: 758 memo_dict = self._memo['Rfindalldirs'] 759 except KeyError: 760 memo_dict = {} 761 self._memo['Rfindalldirs'] = memo_dict 762 else: 763 try: 764 return memo_dict[pathlist] 765 except KeyError: 766 pass 767 768 create_dir_relative_to_self = self.Dir 769 result = [] 770 for path in pathlist: 771 if isinstance(path, SCons.Node.Node): 772 result.append(path) 773 else: 774 dir = create_dir_relative_to_self(path) 775 result.extend(dir.get_all_rdirs()) 776 777 memo_dict[pathlist] = result 778 779 return result
780
781 - def RDirs(self, pathlist):
782 """Search for a list of directories in the Repository list.""" 783 cwd = self.cwd or self.fs._cwd 784 return cwd.Rfindalldirs(pathlist)
785 786 memoizer_counters.append(SCons.Memoize.CountValue('rentry')) 787
788 - def rentry(self):
789 try: 790 return self._memo['rentry'] 791 except KeyError: 792 pass 793 result = self 794 if not self.exists(): 795 norm_name = _my_normcase(self.name) 796 for dir in self.dir.get_all_rdirs(): 797 try: 798 node = dir.entries[norm_name] 799 except KeyError: 800 if dir.entry_exists_on_disk(self.name): 801 result = dir.Entry(self.name) 802 break 803 self._memo['rentry'] = result 804 return result
805
806 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
807 return []
808
809 -class Entry(Base):
810 """This is the class for generic Node.FS entries--that is, things 811 that could be a File or a Dir, but we're just not sure yet. 812 Consequently, the methods in this class really exist just to 813 transform their associated object into the right class when the 814 time comes, and then call the same-named method in the transformed 815 class.""" 816
817 - def diskcheck_match(self):
818 pass
819
820 - def disambiguate(self, must_exist=None):
821 """ 822 """ 823 if self.isdir(): 824 self.__class__ = Dir 825 self._morph() 826 elif self.isfile(): 827 self.__class__ = File 828 self._morph() 829 self.clear() 830 else: 831 # There was nothing on-disk at this location, so look in 832 # the src directory. 833 # 834 # We can't just use self.srcnode() straight away because 835 # that would create an actual Node for this file in the src 836 # directory, and there might not be one. Instead, use the 837 # dir_on_disk() method to see if there's something on-disk 838 # with that name, in which case we can go ahead and call 839 # self.srcnode() to create the right type of entry. 840 srcdir = self.dir.srcnode() 841 if srcdir != self.dir and \ 842 srcdir.entry_exists_on_disk(self.name) and \ 843 self.srcnode().isdir(): 844 self.__class__ = Dir 845 self._morph() 846 elif must_exist: 847 msg = "No such file or directory: '%s'" % self.abspath 848 raise SCons.Errors.UserError, msg 849 else: 850 self.__class__ = File 851 self._morph() 852 self.clear() 853 return self
854
855 - def rfile(self):
856 """We're a generic Entry, but the caller is actually looking for 857 a File at this point, so morph into one.""" 858 self.__class__ = File 859 self._morph() 860 self.clear() 861 return File.rfile(self)
862
863 - def scanner_key(self):
864 return self.get_suffix()
865
866 - def get_contents(self):
867 """Fetch the contents of the entry. 868 869 Since this should return the real contents from the file 870 system, we check to see into what sort of subclass we should 871 morph this Entry.""" 872 try: 873 self = self.disambiguate(must_exist=1) 874 except SCons.Errors.UserError: 875 # There was nothing on disk with which to disambiguate 876 # this entry. Leave it as an Entry, but return a null 877 # string so calls to get_contents() in emitters and the 878 # like (e.g. in qt.py) don't have to disambiguate by hand 879 # or catch the exception. 880 return '' 881 else: 882 return self.get_contents()
883
884 - def must_be_same(self, klass):
885 """Called to make sure a Node is a Dir. Since we're an 886 Entry, we can morph into one.""" 887 if not self.__class__ is klass: 888 self.__class__ = klass 889 self._morph() 890 self.clear()
891 892 # The following methods can get called before the Taskmaster has 893 # had a chance to call disambiguate() directly to see if this Entry 894 # should really be a Dir or a File. We therefore use these to call 895 # disambiguate() transparently (from our caller's point of view). 896 # 897 # Right now, this minimal set of methods has been derived by just 898 # looking at some of the methods that will obviously be called early 899 # in any of the various Taskmasters' calling sequences, and then 900 # empirically figuring out which additional methods are necessary 901 # to make various tests pass. 902
903 - def exists(self):
904 """Return if the Entry exists. Check the file system to see 905 what we should turn into first. Assume a file if there's no 906 directory.""" 907 return self.disambiguate().exists()
908
909 - def rel_path(self, other):
910 d = self.disambiguate() 911 if d.__class__ == Entry: 912 raise "rel_path() could not disambiguate File/Dir" 913 return d.rel_path(other)
914
915 - def new_ninfo(self):
916 return self.disambiguate().new_ninfo()
917
918 - def changed_since_last_build(self, target, prev_ni):
919 return self.disambiguate().changed_since_last_build(target, prev_ni)
920
921 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
922 return self.disambiguate()._glob1(pattern, ondisk, source, strings)
923 924 # This is for later so we can differentiate between Entry the class and Entry 925 # the method of the FS class. 926 _classEntry = Entry 927 928
929 -class LocalFS:
930 931 if SCons.Memoize.use_memoizer: 932 __metaclass__ = SCons.Memoize.Memoized_Metaclass 933 934 # This class implements an abstraction layer for operations involving 935 # a local file system. Essentially, this wraps any function in 936 # the os, os.path or shutil modules that we use to actually go do 937 # anything with or to the local file system. 938 # 939 # Note that there's a very good chance we'll refactor this part of 940 # the architecture in some way as we really implement the interface(s) 941 # for remote file system Nodes. For example, the right architecture 942 # might be to have this be a subclass instead of a base class. 943 # Nevertheless, we're using this as a first step in that direction. 944 # 945 # We're not using chdir() yet because the calling subclass method 946 # needs to use os.chdir() directly to avoid recursion. Will we 947 # really need this one? 948 #def chdir(self, path): 949 # return os.chdir(path)
950 - def chmod(self, path, mode):
951 return os.chmod(path, mode)
952 - def copy(self, src, dst):
953 return shutil.copy(src, dst)
954 - def copy2(self, src, dst):
955 return shutil.copy2(src, dst)
956 - def exists(self, path):
957 return os.path.exists(path)
958 - def getmtime(self, path):
959 return os.path.getmtime(path)
960 - def getsize(self, path):
961 return os.path.getsize(path)
962 - def isdir(self, path):
963 return os.path.isdir(path)
964 - def isfile(self, path):
965 return os.path.isfile(path)
968 - def lstat(self, path):
969 return os.lstat(path)
970 - def listdir(self, path):
971 return os.listdir(path)
972 - def makedirs(self, path):
973 return os.makedirs(path)
974 - def mkdir(self, path):
975 return os.mkdir(path)
976 - def rename(self, old, new):
977 return os.rename(old, new)
978 - def stat(self, path):
979 return os.stat(path)
982 - def open(self, path):
983 return open(path)
986 987 if hasattr(os, 'symlink'): 990 else: 993 994 if hasattr(os, 'readlink'): 997 else:
1000 1001 1002 #class RemoteFS: 1003 # # Skeleton for the obvious methods we might need from the 1004 # # abstraction layer for a remote filesystem. 1005 # def upload(self, local_src, remote_dst): 1006 # pass 1007 # def download(self, remote_src, local_dst): 1008 # pass 1009 1010
1011 -class FS(LocalFS):
1012 1013 memoizer_counters = [] 1014
1015 - def __init__(self, path = None):
1016 """Initialize the Node.FS subsystem. 1017 1018 The supplied path is the top of the source tree, where we 1019 expect to find the top-level build file. If no path is 1020 supplied, the current directory is the default. 1021 1022 The path argument must be a valid absolute path. 1023 """ 1024 if __debug__: logInstanceCreation(self, 'Node.FS') 1025 1026 self._memo = {} 1027 1028 self.Root = {} 1029 self.SConstruct_dir = None 1030 self.max_drift = default_max_drift 1031 1032 self.Top = None 1033 if path is None: 1034 self.pathTop = os.getcwd() 1035 else: 1036 self.pathTop = path 1037 self.defaultDrive = _my_normcase(os.path.splitdrive(self.pathTop)[0]) 1038 1039 self.Top = self.Dir(self.pathTop) 1040 self.Top.path = '.' 1041 self.Top.tpath = '.' 1042 self._cwd = self.Top 1043 1044 DirNodeInfo.fs = self 1045 FileNodeInfo.fs = self
1046
1047 - def set_SConstruct_dir(self, dir):
1048 self.SConstruct_dir = dir
1049
1050 - def get_max_drift(self):
1051 return self.max_drift
1052
1053 - def set_max_drift(self, max_drift):
1054 self.max_drift = max_drift
1055
1056 - def getcwd(self):
1057 return self._cwd
1058
1059 - def chdir(self, dir, change_os_dir=0):
1060 """Change the current working directory for lookups. 1061 If change_os_dir is true, we will also change the "real" cwd 1062 to match. 1063 """ 1064 curr=self._cwd 1065 try: 1066 if not dir is None: 1067 self._cwd = dir 1068 if change_os_dir: 1069 os.chdir(dir.abspath) 1070 except OSError: 1071 self._cwd = curr 1072 raise
1073
1074 - def get_root(self, drive):
1075 """ 1076 Returns the root directory for the specified drive, creating 1077 it if necessary. 1078 """ 1079 drive = _my_normcase(drive) 1080 try: 1081 return self.Root[drive] 1082 except KeyError: 1083 root = RootDir(drive, self) 1084 self.Root[drive] = root 1085 if not drive: 1086 self.Root[self.defaultDrive] = root 1087 elif drive == self.defaultDrive: 1088 self.Root[''] = root 1089 return root
1090
1091 - def _lookup(self, p, directory, fsclass, create=1):
1092 """ 1093 The generic entry point for Node lookup with user-supplied data. 1094 1095 This translates arbitrary input into a canonical Node.FS object 1096 of the specified fsclass. The general approach for strings is 1097 to turn it into a fully normalized absolute path and then call 1098 the root directory's lookup_abs() method for the heavy lifting. 1099 1100 If the path name begins with '#', it is unconditionally 1101 interpreted relative to the top-level directory of this FS. '#' 1102 is treated as a synonym for the top-level SConstruct directory, 1103 much like '~' is treated as a synonym for the user's home 1104 directory in a UNIX shell. So both '#foo' and '#/foo' refer 1105 to the 'foo' subdirectory underneath the top-level SConstruct 1106 directory. 1107 1108 If the path name is relative, then the path is looked up relative 1109 to the specified directory, or the current directory (self._cwd, 1110 typically the SConscript directory) if the specified directory 1111 is None. 1112 """ 1113 if isinstance(p, Base): 1114 # It's already a Node.FS object. Make sure it's the right 1115 # class and return. 1116 p.must_be_same(fsclass) 1117 return p 1118 # str(p) in case it's something like a proxy object 1119 p = str(p) 1120 1121 initial_hash = (p[0:1] == '#') 1122 if initial_hash: 1123 # There was an initial '#', so we strip it and override 1124 # whatever directory they may have specified with the 1125 # top-level SConstruct directory. 1126 p = p[1:] 1127 directory = self.Top 1128 1129 if directory and not isinstance(directory, Dir): 1130 directory = self.Dir(directory) 1131 1132 if do_splitdrive: 1133 drive, p = os.path.splitdrive(p) 1134 else: 1135 drive = '' 1136 if drive and not p: 1137 # This causes a naked drive letter to be treated as a synonym 1138 # for the root directory on that drive. 1139 p = os.sep 1140 absolute = os.path.isabs(p) 1141 1142 needs_normpath = needs_normpath_check.match(p) 1143 1144 if initial_hash or not absolute: 1145 # This is a relative lookup, either to the top-level 1146 # SConstruct directory (because of the initial '#') or to 1147 # the current directory (the path name is not absolute). 1148 # Add the string to the appropriate directory lookup path, 1149 # after which the whole thing gets normalized. 1150 if not directory: 1151 directory = self._cwd 1152 if p: 1153 p = directory.labspath + '/' + p 1154 else: 1155 p = directory.labspath 1156 1157 if needs_normpath: 1158 p = os.path.normpath(p) 1159 1160 if drive or absolute: 1161 root = self.get_root(drive) 1162 else: 1163 if not directory: 1164 directory = self._cwd 1165 root = directory.root 1166 1167 if os.sep != '/': 1168 p = string.replace(p, os.sep, '/') 1169 return root._lookup_abs(p, fsclass, create)
1170
1171 - def Entry(self, name, directory = None, create = 1):
1172 """Lookup or create a generic Entry node with the specified name. 1173 If the name is a relative path (begins with ./, ../, or a file 1174 name), then it is looked up relative to the supplied directory 1175 node, or to the top level directory of the FS (supplied at 1176 construction time) if no directory is supplied. 1177 """ 1178 return self._lookup(name, directory, Entry, create)
1179
1180 - def File(self, name, directory = None, create = 1):
1181 """Lookup or create a File node with the specified name. If 1182 the name is a relative path (begins with ./, ../, or a file name), 1183 then it is looked up relative to the supplied directory node, 1184 or to the top level directory of the FS (supplied at construction 1185 time) if no directory is supplied. 1186 1187 This method will raise TypeError if a directory is found at the 1188 specified path. 1189 """ 1190 return self._lookup(name, directory, File, create)
1191
1192 - def Dir(self, name, directory = None, create = True):
1193 """Lookup or create a Dir node with the specified name. If 1194 the name is a relative path (begins with ./, ../, or a file name), 1195 then it is looked up relative to the supplied directory node, 1196 or to the top level directory of the FS (supplied at construction 1197 time) if no directory is supplied. 1198 1199 This method will raise TypeError if a normal file is found at the 1200 specified path. 1201 """ 1202 return self._lookup(name, directory, Dir, create)
1203
1204 - def VariantDir(self, variant_dir, src_dir, duplicate=1):
1205 """Link the supplied variant directory to the source directory 1206 for purposes of building files.""" 1207 1208 if not isinstance(src_dir, SCons.Node.Node): 1209 src_dir = self.Dir(src_dir) 1210 if not isinstance(variant_dir, SCons.Node.Node): 1211 variant_dir = self.Dir(variant_dir) 1212 if src_dir.is_under(variant_dir): 1213 raise SCons.Errors.UserError, "Source directory cannot be under variant directory." 1214 if variant_dir.srcdir: 1215 if variant_dir.srcdir == src_dir: 1216 return # We already did this. 1217 raise SCons.Errors.UserError, "'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir) 1218 variant_dir.link(src_dir, duplicate)
1219
1220 - def Repository(self, *dirs):
1221 """Specify Repository directories to search.""" 1222 for d in dirs: 1223 if not isinstance(d, SCons.Node.Node): 1224 d = self.Dir(d) 1225 self.Top.addRepository(d)
1226
1227 - def variant_dir_target_climb(self, orig, dir, tail):
1228 """Create targets in corresponding variant directories 1229 1230 Climb the directory tree, and look up path names 1231 relative to any linked variant directories we find. 1232 1233 Even though this loops and walks up the tree, we don't memoize 1234 the return value because this is really only used to process 1235 the command-line targets. 1236 """ 1237 targets = [] 1238 message = None 1239 fmt = "building associated VariantDir targets: %s" 1240 start_dir = dir 1241 while dir: 1242 for bd in dir.variant_dirs: 1243 if start_dir.is_under(bd): 1244 # If already in the build-dir location, don't reflect 1245 return [orig], fmt % str(orig) 1246 p = apply(os.path.join, [bd.path] + tail) 1247 targets.append(self.Entry(p)) 1248 tail = [dir.name] + tail 1249 dir = dir.up() 1250 if targets: 1251 message = fmt % string.join(map(str, targets)) 1252 return targets, message
1253
1254 - def Glob(self, pathname, ondisk=True, source=True, strings=False, cwd=None):
1255 """ 1256 Globs 1257 1258 This is mainly a shim layer 1259 """ 1260 if cwd is None: 1261 cwd = self.getcwd() 1262 return cwd.glob(pathname, ondisk, source, strings)
1263
1264 -class DirNodeInfo(SCons.Node.NodeInfoBase):
1265 # This should get reset by the FS initialization. 1266 current_version_id = 1 1267 1268 fs = None 1269
1270 - def str_to_node(self, s):
1271 top = self.fs.Top 1272 root = top.root 1273 if do_splitdrive: 1274 drive, s = os.path.splitdrive(s) 1275 if drive: 1276 root = self.fs.get_root(drive) 1277 if not os.path.isabs(s): 1278 s = top.labspath + '/' + s 1279 return root._lookup_abs(s, Entry)
1280
1281 -class DirBuildInfo(SCons.Node.BuildInfoBase):
1282 current_version_id = 1
1283 1284 glob_magic_check = re.compile('[*?[]') 1285
1286 -def has_glob_magic(s):
1287 return glob_magic_check.search(s) is not None
1288
1289 -class Dir(Base):
1290 """A class for directories in a file system. 1291 """ 1292 1293 memoizer_counters = [] 1294 1295 NodeInfo = DirNodeInfo 1296 BuildInfo = DirBuildInfo 1297
1298 - def __init__(self, name, directory, fs):
1299 if __debug__: logInstanceCreation(self, 'Node.FS.Dir') 1300 Base.__init__(self, name, directory, fs) 1301 self._morph()
1302
1303 - def _morph(self):
1304 """Turn a file system Node (either a freshly initialized directory 1305 object or a separate Entry object) into a proper directory object. 1306 1307 Set up this directory's entries and hook it into the file 1308 system tree. Specify that directories (this Node) don't use 1309 signatures for calculating whether they're current. 1310 """ 1311 1312 self.repositories = [] 1313 self.srcdir = None 1314 1315 self.entries = {} 1316 self.entries['.'] = self 1317 self.entries['..'] = self.dir 1318 self.cwd = self 1319 self.searched = 0 1320 self._sconsign = None 1321 self.variant_dirs = [] 1322 self.root = self.dir.root 1323 1324 # Don't just reset the executor, replace its action list, 1325 # because it might have some pre-or post-actions that need to 1326 # be preserved. 1327 self.builder = get_MkdirBuilder() 1328 self.get_executor().set_action_list(self.builder.action)
1329
1330 - def diskcheck_match(self):
1331 diskcheck_match(self, self.isfile, 1332 "File %s found where directory expected.")
1333
1334 - def __clearRepositoryCache(self, duplicate=None):
1335 """Called when we change the repository(ies) for a directory. 1336 This clears any cached information that is invalidated by changing 1337 the repository.""" 1338 1339 for node in self.entries.values(): 1340 if node != self.dir: 1341 if node != self and isinstance(node, Dir): 1342 node.__clearRepositoryCache(duplicate) 1343 else: 1344 node.clear() 1345 try: 1346 del node._srcreps 1347 except AttributeError: 1348 pass 1349 if duplicate != None: 1350 node.duplicate=duplicate
1351
1352 - def __resetDuplicate(self, node):
1353 if node != self: 1354 node.duplicate = node.get_dir().duplicate
1355
1356 - def Entry(self, name):
1357 """ 1358 Looks up or creates an entry node named 'name' relative to 1359 this directory. 1360 """ 1361 return self.fs.Entry(name, self)
1362
1363 - def Dir(self, name, create=True):
1364 """ 1365 Looks up or creates a directory node named 'name' relative to 1366 this directory. 1367 """ 1368 dir = self.fs.Dir(name, self, create) 1369 return dir
1370
1371 - def File(self, name):
1372 """ 1373 Looks up or creates a file node named 'name' relative to 1374 this directory. 1375 """ 1376 return self.fs.File(name, self)
1377
1378 - def _lookup_rel(self, name, klass, create=1):
1379 """ 1380 Looks up a *normalized* relative path name, relative to this 1381 directory. 1382 1383 This method is intended for use by internal lookups with 1384 already-normalized path data. For general-purpose lookups, 1385 use the Entry(), Dir() and File() methods above. 1386 1387 This method does *no* input checking and will die or give 1388 incorrect results if it's passed a non-normalized path name (e.g., 1389 a path containing '..'), an absolute path name, a top-relative 1390 ('#foo') path name, or any kind of object. 1391 """ 1392 name = self.entry_labspath(name) 1393 return self.root._lookup_abs(name, klass, create)
1394 1402
1403 - def getRepositories(self):
1404 """Returns a list of repositories for this directory. 1405 """ 1406 if self.srcdir and not self.duplicate: 1407 return self.srcdir.get_all_rdirs() + self.repositories 1408 return self.repositories
1409 1410 memoizer_counters.append(SCons.Memoize.CountValue('get_all_rdirs')) 1411
1412 - def get_all_rdirs(self):
1413 try: 1414 return self._memo['get_all_rdirs'] 1415 except KeyError: 1416 pass 1417 1418 result = [self] 1419 fname = '.' 1420 dir = self 1421 while dir: 1422 for rep in dir.getRepositories(): 1423 result.append(rep.Dir(fname)) 1424 if fname == '.': 1425 fname = dir.name 1426 else: 1427 fname = dir.name + os.sep + fname 1428 dir = dir.up() 1429 1430 self._memo['get_all_rdirs'] = result 1431 1432 return result
1433
1434 - def addRepository(self, dir):
1435 if dir != self and not dir in self.repositories: 1436 self.repositories.append(dir) 1437 dir.tpath = '.' 1438 self.__clearRepositoryCache()
1439
1440 - def up(self):
1441 return self.entries['..']
1442
1443 - def _rel_path_key(self, other):
1444 return str(other)
1445 1446 memoizer_counters.append(SCons.Memoize.CountDict('rel_path', _rel_path_key)) 1447
1448 - def rel_path(self, other):
1449 """Return a path to "other" relative to this directory. 1450 """ 1451 1452 # This complicated and expensive method, which constructs relative 1453 # paths between arbitrary Node.FS objects, is no longer used 1454 # by SCons itself. It was introduced to store dependency paths 1455 # in .sconsign files relative to the target, but that ended up 1456 # being significantly inefficient. 1457 # 1458 # We're continuing to support the method because some SConstruct 1459 # files out there started using it when it was available, and 1460 # we're all about backwards compatibility.. 1461 1462 try: 1463 memo_dict = self._memo['rel_path'] 1464 except KeyError: 1465 memo_dict = {} 1466 self._memo['rel_path'] = memo_dict 1467 else: 1468 try: 1469 return memo_dict[other] 1470 except KeyError: 1471 pass 1472 1473 if self is other: 1474 1475 result = '.' 1476 1477 elif not other in self.path_elements: 1478 1479 try: 1480 other_dir = other.get_dir() 1481 except AttributeError: 1482 result = str(other) 1483 else: 1484 if other_dir is None: 1485 result = other.name 1486 else: 1487 dir_rel_path = self.rel_path(other_dir) 1488 if dir_rel_path == '.': 1489 result = other.name 1490 else: 1491 result = dir_rel_path + os.sep + other.name 1492 1493 else: 1494 1495 i = self.path_elements.index(other) + 1 1496 1497 path_elems = ['..'] * (len(self.path_elements) - i) \ 1498 + map(lambda n: n.name, other.path_elements[i:]) 1499 1500 result = string.join(path_elems, os.sep) 1501 1502 memo_dict[other] = result 1503 1504 return result
1505
1506 - def get_env_scanner(self, env, kw={}):
1507 import SCons.Defaults 1508 return SCons.Defaults.DirEntryScanner
1509
1510 - def get_target_scanner(self):
1511 import SCons.Defaults 1512 return SCons.Defaults.DirEntryScanner
1513
1514 - def get_found_includes(self, env, scanner, path):
1515 """Return this directory's implicit dependencies. 1516 1517 We don't bother caching the results because the scan typically 1518 shouldn't be requested more than once (as opposed to scanning 1519 .h file contents, which can be requested as many times as the 1520 files is #included by other files). 1521 """ 1522 if not scanner: 1523 return [] 1524 # Clear cached info for this Dir. If we already visited this 1525 # directory on our walk down the tree (because we didn't know at 1526 # that point it was being used as the source for another Node) 1527 # then we may have calculated build signature before realizing 1528 # we had to scan the disk. Now that we have to, though, we need 1529 # to invalidate the old calculated signature so that any node 1530 # dependent on our directory structure gets one that includes 1531 # info about everything on disk. 1532 self.clear() 1533 return scanner(self, env, path)
1534 1535 # 1536 # Taskmaster interface subsystem 1537 # 1538
1539 - def prepare(self):
1540 pass
1541
1542 - def build(self, **kw):
1543 """A null "builder" for directories.""" 1544 global MkdirBuilder 1545 if not self.builder is MkdirBuilder: 1546 apply(SCons.Node.Node.build, [self,], kw)
1547 1548 # 1549 # 1550 # 1551
1552 - def _create(self):
1553 """Create this directory, silently and without worrying about 1554 whether the builder is the default or not.""" 1555 listDirs = [] 1556 parent = self 1557 while parent: 1558 if parent.exists(): 1559 break 1560 listDirs.append(parent) 1561 p = parent.up() 1562 if p is None: 1563 raise SCons.Errors.StopError, parent.path 1564 parent = p 1565 listDirs.reverse() 1566 for dirnode in listDirs: 1567 try: 1568 # Don't call dirnode.build(), call the base Node method 1569 # directly because we definitely *must* create this 1570 # directory. The dirnode.build() method will suppress 1571 # the build if it's the default builder. 1572 SCons.Node.Node.build(dirnode) 1573 dirnode.get_executor().nullify() 1574 # The build() action may or may not have actually 1575 # created the directory, depending on whether the -n 1576 # option was used or not. Delete the _exists and 1577 # _rexists attributes so they can be reevaluated. 1578 dirnode.clear() 1579 except OSError: 1580 pass
1581
1583 global MkdirBuilder 1584 return not self.builder is MkdirBuilder and self.has_builder()
1585
1586 - def alter_targets(self):
1587 """Return any corresponding targets in a variant directory. 1588 """ 1589 return self.fs.variant_dir_target_climb(self, self, [])
1590
1591 - def scanner_key(self):
1592 """A directory does not get scanned.""" 1593 return None
1594
1595 - def get_contents(self):
1596 """Return aggregate contents of all our children.""" 1597 contents = map(lambda n: n.get_contents(), self.children()) 1598 return string.join(contents, '')
1599
1600 - def do_duplicate(self, src):
1601 pass
1602 1603 changed_since_last_build = SCons.Node.Node.state_has_changed 1604
1605 - def is_up_to_date(self):
1606 """If any child is not up-to-date, then this directory isn't, 1607 either.""" 1608 if not self.builder is MkdirBuilder and not self.exists(): 1609 return 0 1610 up_to_date = SCons.Node.up_to_date 1611 for kid in self.children(): 1612 if kid.get_state() > up_to_date: 1613 return 0 1614 return 1
1615
1616 - def rdir(self):
1617 if not self.exists(): 1618 norm_name = _my_normcase(self.name) 1619 for dir in self.dir.get_all_rdirs(): 1620 try: node = dir.entries[norm_name] 1621 except KeyError: node = dir.dir_on_disk(self.name) 1622 if node and node.exists() and \ 1623 (isinstance(dir, Dir) or isinstance(dir, Entry)): 1624 return node 1625 return self
1626
1627 - def sconsign(self):
1628 """Return the .sconsign file info for this directory, 1629 creating it first if necessary.""" 1630 if not self._sconsign: 1631 import SCons.SConsign 1632 self._sconsign = SCons.SConsign.ForDirectory(self) 1633 return self._sconsign
1634
1635 - def srcnode(self):
1636 """Dir has a special need for srcnode()...if we 1637 have a srcdir attribute set, then that *is* our srcnode.""" 1638 if self.srcdir: 1639 return self.srcdir 1640 return Base.srcnode(self)
1641
1642 - def get_timestamp(self):
1643 """Return the latest timestamp from among our children""" 1644 stamp = 0 1645 for kid in self.children(): 1646 if kid.get_timestamp() > stamp: 1647 stamp = kid.get_timestamp() 1648 return stamp
1649
1650 - def entry_abspath(self, name):
1651 return self.abspath + os.sep + name
1652
1653 - def entry_labspath(self, name):
1654 return self.labspath + '/' + name
1655
1656 - def entry_path(self, name):
1657 return self.path + os.sep + name
1658
1659 - def entry_tpath(self, name):
1660 return self.tpath + os.sep + name
1661
1662 - def entry_exists_on_disk(self, name):
1663 try: 1664 d = self.on_disk_entries 1665 except AttributeError: 1666 d = {} 1667 try: 1668 entries = os.listdir(self.abspath) 1669 except OSError: 1670 pass 1671 else: 1672 for entry in map(_my_normcase, entries): 1673 d[entry] = 1 1674 self.on_disk_entries = d 1675 return d.has_key(_my_normcase(name))
1676 1677 memoizer_counters.append(SCons.Memoize.CountValue('srcdir_list')) 1678
1679 - def srcdir_list(self):
1680 try: 1681 return self._memo['srcdir_list'] 1682 except KeyError: 1683 pass 1684 1685 result = [] 1686 1687 dirname = '.' 1688 dir = self 1689 while dir: 1690 if dir.srcdir: 1691 result.append(dir.srcdir.Dir(dirname)) 1692 dirname = dir.name + os.sep + dirname 1693 dir = dir.up() 1694 1695 self._memo['srcdir_list'] = result 1696 1697 return result
1698
1699 - def srcdir_duplicate(self, name):
1700 for dir in self.srcdir_list(): 1701 if self.is_under(dir): 1702 # We shouldn't source from something in the build path; 1703 # variant_dir is probably under src_dir, in which case 1704 # we are reflecting. 1705 break 1706 if dir.entry_exists_on_disk(name): 1707 srcnode = dir.Entry(name).disambiguate() 1708 if self.duplicate: 1709 node = self.Entry(name).disambiguate() 1710 node.do_duplicate(srcnode) 1711 return node 1712 else: 1713 return srcnode 1714 return None
1715
1716 - def _srcdir_find_file_key(self, filename):
1717 return filename
1718 1719 memoizer_counters.append(SCons.Memoize.CountDict('srcdir_find_file', _srcdir_find_file_key)) 1720
1721 - def srcdir_find_file(self, filename):
1722 try: 1723 memo_dict = self._memo['srcdir_find_file'] 1724 except KeyError: 1725 memo_dict = {} 1726 self._memo['srcdir_find_file'] = memo_dict 1727 else: 1728 try: 1729 return memo_dict[filename] 1730 except KeyError: 1731 pass 1732 1733 def func(node): 1734 if (isinstance(node, File) or isinstance(node, Entry)) and \ 1735 (node.is_derived() or node.exists()): 1736 return node 1737 return None
1738 1739 norm_name = _my_normcase(filename) 1740 1741 for rdir in self.get_all_rdirs(): 1742 try: node = rdir.entries[norm_name] 1743 except KeyError: node = rdir.file_on_disk(filename) 1744 else: node = func(node) 1745 if node: 1746 result = (node, self) 1747 memo_dict[filename] = result 1748 return result 1749 1750 for srcdir in self.srcdir_list(): 1751 for rdir in srcdir.get_all_rdirs(): 1752 try: node = rdir.entries[norm_name] 1753 except KeyError: node = rdir.file_on_disk(filename) 1754 else: node = func(node) 1755 if node: 1756 result = (File(filename, self, self.fs), srcdir) 1757 memo_dict[filename] = result 1758 return result 1759 1760 result = (None, None) 1761 memo_dict[filename] = result 1762 return result
1763
1764 - def dir_on_disk(self, name):
1765 if self.entry_exists_on_disk(name): 1766 try: return self.Dir(name) 1767 except TypeError: pass 1768 return None
1769
1770 - def file_on_disk(self, name):
1771 if self.entry_exists_on_disk(name) or \ 1772 diskcheck_rcs(self, name) or \ 1773 diskcheck_sccs(self, name): 1774 try: return self.File(name) 1775 except TypeError: pass 1776 node = self.srcdir_duplicate(name) 1777 if isinstance(node, Dir): 1778 node = None 1779 return node
1780
1781 - def walk(self, func, arg):
1782 """ 1783 Walk this directory tree by calling the specified function 1784 for each directory in the tree. 1785 1786 This behaves like the os.path.walk() function, but for in-memory 1787 Node.FS.Dir objects. The function takes the same arguments as 1788 the functions passed to os.path.walk(): 1789 1790 func(arg, dirname, fnames) 1791 1792 Except that "dirname" will actually be the directory *Node*, 1793 not the string. The '.' and '..' entries are excluded from 1794 fnames. The fnames list may be modified in-place to filter the 1795 subdirectories visited or otherwise impose a specific order. 1796 The "arg" argument is always passed to func() and may be used 1797 in any way (or ignored, passing None is common). 1798 """ 1799 entries = self.entries 1800 names = entries.keys() 1801 names.remove('.') 1802 names.remove('..') 1803 func(arg, self, names) 1804 select_dirs = lambda n, e=entries: isinstance(e[n], Dir) 1805 for dirname in filter(select_dirs, names): 1806 entries[dirname].walk(func, arg)
1807
1808 - def glob(self, pathname, ondisk=True, source=False, strings=False):
1809 """ 1810 Returns a list of Nodes (or strings) matching a specified 1811 pathname pattern. 1812 1813 Pathname patterns follow UNIX shell semantics: * matches 1814 any-length strings of any characters, ? matches any character, 1815 and [] can enclose lists or ranges of characters. Matches do 1816 not span directory separators. 1817 1818 The matches take into account Repositories, returning local 1819 Nodes if a corresponding entry exists in a Repository (either 1820 an in-memory Node or something on disk). 1821 1822 By defafult, the glob() function matches entries that exist 1823 on-disk, in addition to in-memory Nodes. Setting the "ondisk" 1824 argument to False (or some other non-true value) causes the glob() 1825 function to only match in-memory Nodes. The default behavior is 1826 to return both the on-disk and in-memory Nodes. 1827 1828 The "source" argument, when true, specifies that corresponding 1829 source Nodes must be returned if you're globbing in a build 1830 directory (initialized with VariantDir()). The default behavior 1831 is to return Nodes local to the VariantDir(). 1832 1833 The "strings" argument, when true, returns the matches as strings, 1834 not Nodes. The strings are path names relative to this directory. 1835 1836 The underlying algorithm is adapted from the glob.glob() function 1837 in the Python library (but heavily modified), and uses fnmatch() 1838 under the covers. 1839 """ 1840 dirname, basename = os.path.split(pathname) 1841 if not dirname: 1842 return self._glob1(basename, ondisk, source, strings) 1843 if has_glob_magic(dirname): 1844 list = self.glob(dirname, ondisk, source, strings=False) 1845 else: 1846 list = [self.Dir(dirname, create=True)] 1847 result = [] 1848 for dir in list: 1849 r = dir._glob1(basename, ondisk, source, strings) 1850 if strings: 1851 r = map(lambda x, d=str(dir): os.path.join(d, x), r) 1852 result.extend(r) 1853 return result
1854
1855 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
1856 """ 1857 Globs for and returns a list of entry names matching a single 1858 pattern in this directory. 1859 1860 This searches any repositories and source directories for 1861 corresponding entries and returns a Node (or string) relative 1862 to the current directory if an entry is found anywhere. 1863 1864 TODO: handle pattern with no wildcard 1865 """ 1866 search_dir_list = self.get_all_rdirs() 1867 for srcdir in self.srcdir_list(): 1868 search_dir_list.extend(srcdir.get_all_rdirs()) 1869 1870 names = [] 1871 for dir in search_dir_list: 1872 # We use the .name attribute from the Node because the keys of 1873 # the dir.entries dictionary are normalized (that is, all upper 1874 # case) on case-insensitive systems like Windows. 1875 #node_names = [ v.name for k, v in dir.entries.items() if k not in ('.', '..') ] 1876 entry_names = filter(lambda n: n not in ('.', '..'), dir.entries.keys()) 1877 node_names = map(lambda n, e=dir.entries: e[n].name, entry_names) 1878 names.extend(node_names) 1879 if ondisk: 1880 try: 1881 disk_names = os.listdir(dir.abspath) 1882 except os.error: 1883 pass 1884 else: 1885 names.extend(disk_names) 1886 if not strings: 1887 # We're going to return corresponding Nodes in 1888 # the local directory, so we need to make sure 1889 # those Nodes exist. We only want to create 1890 # Nodes for the entries that will match the 1891 # specified pattern, though, which means we 1892 # need to filter the list here, even though 1893 # the overall list will also be filtered later, 1894 # after we exit this loop. 1895 if pattern[0] != '.': 1896 #disk_names = [ d for d in disk_names if d[0] != '.' ] 1897 disk_names = filter(lambda x: x[0] != '.', disk_names) 1898 disk_names = fnmatch.filter(disk_names, pattern) 1899 rep_nodes = map(dir.Entry, disk_names) 1900 #rep_nodes = [ n.disambiguate() for n in rep_nodes ] 1901 rep_nodes = map(lambda n: n.disambiguate(), rep_nodes) 1902 for node, name in izip(rep_nodes, disk_names): 1903 n = self.Entry(name) 1904 if n.__class__ != node.__class__: 1905 n.__class__ = node.__class__ 1906 n._morph() 1907 1908 names = set(names) 1909 if pattern[0] != '.': 1910 #names = [ n for n in names if n[0] != '.' ] 1911 names = filter(lambda x: x[0] != '.', names) 1912 names = fnmatch.filter(names, pattern) 1913 1914 if strings: 1915 return names 1916 1917 #return [ self.entries[_my_normcase(n)] for n in names ] 1918 return map(lambda n, e=self.entries: e[_my_normcase(n)], names)
1919
1920 -class RootDir(Dir):
1921 """A class for the root directory of a file system. 1922 1923 This is the same as a Dir class, except that the path separator 1924 ('/' or '\\') is actually part of the name, so we don't need to 1925 add a separator when creating the path names of entries within 1926 this directory. 1927 """
1928 - def __init__(self, name, fs):
1929 if __debug__: logInstanceCreation(self, 'Node.FS.RootDir') 1930 # We're going to be our own parent directory (".." entry and .dir 1931 # attribute) so we have to set up some values so Base.__init__() 1932 # won't gag won't it calls some of our methods. 1933 self.abspath = '' 1934 self.labspath = '' 1935 self.path = '' 1936 self.tpath = '' 1937 self.path_elements = [] 1938 self.duplicate = 0 1939 self.root = self 1940 Base.__init__(self, name, self, fs) 1941 1942 # Now set our paths to what we really want them to be: the 1943 # initial drive letter (the name) plus the directory separator, 1944 # except for the "lookup abspath," which does not have the 1945 # drive letter. 1946 self.abspath = name + os.sep 1947 self.labspath = '' 1948 self.path = name + os.sep 1949 self.tpath = name + os.sep 1950 self._morph() 1951 1952 self._lookupDict = {} 1953 1954 # The // and os.sep + os.sep entries are necessary because 1955 # os.path.normpath() seems to preserve double slashes at the 1956 # beginning of a path (presumably for UNC path names), but 1957 # collapses triple slashes to a single slash. 1958 self._lookupDict[''] = self 1959 self._lookupDict['/'] = self 1960 self._lookupDict['//'] = self 1961 self._lookupDict[os.sep] = self 1962 self._lookupDict[os.sep + os.sep] = self
1963
1964 - def must_be_same(self, klass):
1965 if klass is Dir: 1966 return 1967 Base.must_be_same(self, klass)
1968
1969 - def _lookup_abs(self, p, klass, create=1):
1970 """ 1971 Fast (?) lookup of a *normalized* absolute path. 1972 1973 This method is intended for use by internal lookups with 1974 already-normalized path data. For general-purpose lookups, 1975 use the FS.Entry(), FS.Dir() or FS.File() methods. 1976 1977 The caller is responsible for making sure we're passed a 1978 normalized absolute path; we merely let Python's dictionary look 1979 up and return the One True Node.FS object for the path. 1980 1981 If no Node for the specified "p" doesn't already exist, and 1982 "create" is specified, the Node may be created after recursive 1983 invocation to find or create the parent directory or directories. 1984 """ 1985 k = _my_normcase(p) 1986 try: 1987 result = self._lookupDict[k] 1988 except KeyError: 1989 if not create: 1990 raise SCons.Errors.UserError 1991 # There is no Node for this path name, and we're allowed 1992 # to create it. 1993 dir_name, file_name = os.path.split(p) 1994 dir_node = self._lookup_abs(dir_name, Dir) 1995 result = klass(file_name, dir_node, self.fs) 1996 self._lookupDict[k] = result 1997 dir_node.entries[_my_normcase(file_name)] = result 1998 dir_node.implicit = None 1999 2000 # Double-check on disk (as configured) that the Node we 2001 # created matches whatever is out there in the real world. 2002 result.diskcheck_match() 2003 else: 2004 # There is already a Node for this path name. Allow it to 2005 # complain if we were looking for an inappropriate type. 2006 result.must_be_same(klass) 2007 return result
2008
2009 - def __str__(self):
2010 return self.abspath
2011
2012 - def entry_abspath(self, name):
2013 return self.abspath + name
2014
2015 - def entry_labspath(self, name):
2016 return '/' + name
2017
2018 - def entry_path(self, name):
2019 return self.path + name
2020
2021 - def entry_tpath(self, name):
2022 return self.tpath + name
2023
2024 - def is_under(self, dir):
2025 if self is dir: 2026 return 1 2027 else: 2028 return 0
2029
2030 - def up(self):
2031 return None
2032
2033 - def get_dir(self):
2034 return None
2035
2036 - def src_builder(self):
2037 return _null
2038
2039 -class FileNodeInfo(SCons.Node.NodeInfoBase):
2040 current_version_id = 1 2041 2042 field_list = ['csig', 'timestamp', 'size'] 2043 2044 # This should get reset by the FS initialization. 2045 fs = None 2046
2047 - def str_to_node(self, s):
2048 top = self.fs.Top 2049 root = top.root 2050 if do_splitdrive: 2051 drive, s = os.path.splitdrive(s) 2052 if drive: 2053 root = self.fs.get_root(drive) 2054 if not os.path.isabs(s): 2055 s = top.labspath + '/' + s 2056 return root._lookup_abs(s, Entry)
2057
2058 -class FileBuildInfo(SCons.Node.BuildInfoBase):
2059 current_version_id = 1 2060
2061 - def convert_to_sconsign(self):
2062 """ 2063 Converts this FileBuildInfo object for writing to a .sconsign file 2064 2065 This replaces each Node in our various dependency lists with its 2066 usual string representation: relative to the top-level SConstruct 2067 directory, or an absolute path if it's outside. 2068 """ 2069 if os.sep == '/': 2070 node_to_str = str 2071 else: 2072 def node_to_str(n): 2073 try: 2074 s = n.path 2075 except AttributeError: 2076 s = str(n) 2077 else: 2078 s = string.replace(s, os.sep, '/') 2079 return s
2080 for attr in ['bsources', 'bdepends', 'bimplicit']: 2081 try: 2082 val = getattr(self, attr) 2083 except AttributeError: 2084 pass 2085 else: 2086 setattr(self, attr, map(node_to_str, val))
2087 - def convert_from_sconsign(self, dir, name):
2088 """ 2089 Converts a newly-read FileBuildInfo object for in-SCons use 2090 2091 For normal up-to-date checking, we don't have any conversion to 2092 perform--but we're leaving this method here to make that clear. 2093 """ 2094 pass
2095 - def prepare_dependencies(self):
2096 """ 2097 Prepares a FileBuildInfo object for explaining what changed 2098 2099 The bsources, bdepends and bimplicit lists have all been 2100 stored on disk as paths relative to the top-level SConstruct 2101 directory. Convert the strings to actual Nodes (for use by the 2102 --debug=explain code and --implicit-cache). 2103 """ 2104 attrs = [ 2105 ('bsources', 'bsourcesigs'), 2106 ('bdepends', 'bdependsigs'), 2107 ('bimplicit', 'bimplicitsigs'), 2108 ] 2109 for (nattr, sattr) in attrs: 2110 try: 2111 strings = getattr(self, nattr) 2112 nodeinfos = getattr(self, sattr) 2113 except AttributeError: 2114 pass 2115 else: 2116 nodes = [] 2117 for s, ni in izip(strings, nodeinfos): 2118 if not isinstance(s, SCons.Node.Node): 2119 s = ni.str_to_node(s) 2120 nodes.append(s) 2121 setattr(self, nattr, nodes)
2122 - def format(self, names=0):
2123 result = [] 2124 bkids = self.bsources + self.bdepends + self.bimplicit 2125 bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs 2126 for bkid, bkidsig in izip(bkids, bkidsigs): 2127 result.append(str(bkid) + ': ' + 2128 string.join(bkidsig.format(names=names), ' ')) 2129 result.append('%s [%s]' % (self.bactsig, self.bact)) 2130 return string.join(result, '\n')
2131
2132 -class File(Base):
2133 """A class for files in a file system. 2134 """ 2135 2136 memoizer_counters = [] 2137 2138 NodeInfo = FileNodeInfo 2139 BuildInfo = FileBuildInfo 2140
2141 - def diskcheck_match(self):
2142 diskcheck_match(self, self.isdir, 2143 "Directory %s found where file expected.")
2144
2145 - def __init__(self, name, directory, fs):
2146 if __debug__: logInstanceCreation(self, 'Node.FS.File') 2147 Base.__init__(self, name, directory, fs) 2148 self._morph()
2149
2150 - def Entry(self, name):
2151 """Create an entry node named 'name' relative to 2152 the SConscript directory of this file.""" 2153 cwd = self.cwd or self.fs._cwd 2154 return cwd.Entry(name)
2155
2156 - def Dir(self, name, create=True):
2157 """Create a directory node named 'name' relative to 2158 the SConscript directory of this file.""" 2159 cwd = self.cwd or self.fs._cwd 2160 return cwd.Dir(name, create)
2161
2162 - def Dirs(self, pathlist):
2163 """Create a list of directories relative to the SConscript 2164 directory of this file.""" 2165 return map(lambda p, s=self: s.Dir(p), pathlist)
2166
2167 - def File(self, name):
2168 """Create a file node named 'name' relative to 2169 the SConscript directory of this file.""" 2170 cwd = self.cwd or self.fs._cwd 2171 return cwd.File(name)
2172 2173 #def generate_build_dict(self): 2174 # """Return an appropriate dictionary of values for building 2175 # this File.""" 2176 # return {'Dir' : self.Dir, 2177 # 'File' : self.File, 2178 # 'RDirs' : self.RDirs} 2179
2180 - def _morph(self):
2181 """Turn a file system node into a File object.""" 2182 self.scanner_paths = {} 2183 if not hasattr(self, '_local'): 2184 self._local = 0 2185 2186 # If there was already a Builder set on this entry, then 2187 # we need to make sure we call the target-decider function, 2188 # not the source-decider. Reaching in and doing this by hand 2189 # is a little bogus. We'd prefer to handle this by adding 2190 # an Entry.builder_set() method that disambiguates like the 2191 # other methods, but that starts running into problems with the 2192 # fragile way we initialize Dir Nodes with their Mkdir builders, 2193 # yet still allow them to be overridden by the user. Since it's 2194 # not clear right now how to fix that, stick with what works 2195 # until it becomes clear... 2196 if self.has_builder(): 2197 self.changed_since_last_build = self.decide_target
2198
2199 - def scanner_key(self):
2200 return self.get_suffix()
2201
2202 - def get_contents(self):
2203 if not self.rexists(): 2204 return '' 2205 fname = self.rfile().abspath 2206 try: 2207 r = open(fname, "rb").read() 2208 except EnvironmentError, e: 2209 if not e.filename: 2210 e.filename = fname 2211 raise 2212 return r
2213 2214 memoizer_counters.append(SCons.Memoize.CountValue('get_size')) 2215
2216 - def get_size(self):
2217 try: 2218 return self._memo['get_size'] 2219 except KeyError: 2220 pass 2221 2222 if self.rexists(): 2223 size = self.rfile().getsize() 2224 else: 2225 size = 0 2226 2227 self._memo['get_size'] = size 2228 2229 return size
2230 2231 memoizer_counters.append(SCons.Memoize.CountValue('get_timestamp')) 2232
2233 - def get_timestamp(self):
2234 try: 2235 return self._memo['get_timestamp'] 2236 except KeyError: 2237 pass 2238 2239 if self.rexists(): 2240 timestamp = self.rfile().getmtime() 2241 else: 2242 timestamp = 0 2243 2244 self._memo['get_timestamp'] = timestamp 2245 2246 return timestamp
2247
2248 - def store_info(self):
2249 # Merge our build information into the already-stored entry. 2250 # This accomodates "chained builds" where a file that's a target 2251 # in one build (SConstruct file) is a source in a different build. 2252 # See test/chained-build.py for the use case. 2253 self.dir.sconsign().store_info(self.name, self)
2254 2255 convert_copy_attrs = [ 2256 'bsources', 2257 'bimplicit', 2258 'bdepends', 2259 'bact', 2260 'bactsig', 2261 'ninfo', 2262 ] 2263 2264 2265 convert_sig_attrs = [ 2266 'bsourcesigs', 2267 'bimplicitsigs', 2268 'bdependsigs', 2269 ] 2270
2271 - def convert_old_entry(self, old_entry):
2272 # Convert a .sconsign entry from before the Big Signature 2273 # Refactoring, doing what we can to convert its information 2274 # to the new .sconsign entry format. 2275 # 2276 # The old format looked essentially like this: 2277 # 2278 # BuildInfo 2279 # .ninfo (NodeInfo) 2280 # .bsig 2281 # .csig 2282 # .timestamp 2283 # .size 2284 # .bsources 2285 # .bsourcesigs ("signature" list) 2286 # .bdepends 2287 # .bdependsigs ("signature" list) 2288 # .bimplicit 2289 # .bimplicitsigs ("signature" list) 2290 # .bact 2291 # .bactsig 2292 # 2293 # The new format looks like this: 2294 # 2295 # .ninfo (NodeInfo) 2296 # .bsig 2297 # .csig 2298 # .timestamp 2299 # .size 2300 # .binfo (BuildInfo) 2301 # .bsources 2302 # .bsourcesigs (NodeInfo list) 2303 # .bsig 2304 # .csig 2305 # .timestamp 2306 # .size 2307 # .bdepends 2308 # .bdependsigs (NodeInfo list) 2309 # .bsig 2310 # .csig 2311 # .timestamp 2312 # .size 2313 # .bimplicit 2314 # .bimplicitsigs (NodeInfo list) 2315 # .bsig 2316 # .csig 2317 # .timestamp 2318 # .size 2319 # .bact 2320 # .bactsig 2321 # 2322 # The basic idea of the new structure is that a NodeInfo always 2323 # holds all available information about the state of a given Node 2324 # at a certain point in time. The various .b*sigs lists can just 2325 # be a list of pointers to the .ninfo attributes of the different 2326 # dependent nodes, without any copying of information until it's 2327 # time to pickle it for writing out to a .sconsign file. 2328 # 2329 # The complicating issue is that the *old* format only stored one 2330 # "signature" per dependency, based on however the *last* build 2331 # was configured. We don't know from just looking at it whether 2332 # it was a build signature, a content signature, or a timestamp 2333 # "signature". Since we no longer use build signatures, the 2334 # best we can do is look at the length and if it's thirty two, 2335 # assume that it was (or might have been) a content signature. 2336 # If it was actually a build signature, then it will cause a 2337 # rebuild anyway when it doesn't match the new content signature, 2338 # but that's probably the best we can do. 2339 import SCons.SConsign 2340 new_entry = SCons.SConsign.SConsignEntry() 2341 new_entry.binfo = self.new_binfo() 2342 binfo = new_entry.binfo 2343 for attr in self.convert_copy_attrs: 2344 try: 2345 value = getattr(old_entry, attr) 2346 except AttributeError: 2347 pass 2348 else: 2349 setattr(binfo, attr, value) 2350 delattr(old_entry, attr) 2351 for attr in self.convert_sig_attrs: 2352 try: 2353 sig_list = getattr(old_entry, attr) 2354 except AttributeError: 2355 pass 2356 else: 2357 value = [] 2358 for sig in sig_list: 2359 ninfo = self.new_ninfo() 2360 if len(sig) == 32: 2361 ninfo.csig = sig 2362 else: 2363 ninfo.timestamp = sig 2364 value.append(ninfo) 2365 setattr(binfo, attr, value) 2366 delattr(old_entry, attr) 2367 return new_entry
2368 2369 memoizer_counters.append(SCons.Memoize.CountValue('get_stored_info')) 2370
2371 - def get_stored_info(self):
2372 try: 2373 return self._memo['get_stored_info'] 2374 except KeyError: 2375 pass 2376 2377 try: 2378 sconsign_entry = self.dir.sconsign().get_entry(self.name) 2379 except (KeyError, OSError): 2380 import SCons.SConsign 2381 sconsign_entry = SCons.SConsign.SConsignEntry() 2382 sconsign_entry.binfo = self.new_binfo() 2383 sconsign_entry.ninfo = self.new_ninfo() 2384 else: 2385 if isinstance(sconsign_entry, FileBuildInfo): 2386 # This is a .sconsign file from before the Big Signature 2387 # Refactoring; convert it as best we can. 2388 sconsign_entry = self.convert_old_entry(sconsign_entry) 2389 try: 2390 delattr(sconsign_entry.ninfo, 'bsig') 2391 except AttributeError: 2392 pass 2393 2394 self._memo['get_stored_info'] = sconsign_entry 2395 2396 return sconsign_entry
2397
2398 - def get_stored_implicit(self):
2399 binfo = self.get_stored_info().binfo 2400 binfo.prepare_dependencies() 2401 try: return binfo.bimplicit 2402 except AttributeError: return None
2403
2404 - def rel_path(self, other):
2405 return self.dir.rel_path(other)
2406
2407 - def _get_found_includes_key(self, env, scanner, path):
2408 return (id(env), id(scanner), path)
2409 2410 memoizer_counters.append(SCons.Memoize.CountDict('get_found_includes', _get_found_includes_key)) 2411
2412 - def get_found_includes(self, env, scanner, path):
2413 """Return the included implicit dependencies in this file. 2414 Cache results so we only scan the file once per path 2415 regardless of how many times this information is requested. 2416 """ 2417 memo_key = (id(env), id(scanner), path) 2418 try: 2419 memo_dict = self._memo['get_found_includes'] 2420 except KeyError: 2421 memo_dict = {} 2422 self._memo['get_found_includes'] = memo_dict 2423 else: 2424 try: 2425 return memo_dict[memo_key] 2426 except KeyError: 2427 pass 2428 2429 if scanner: 2430 result = scanner(self, env, path) 2431 result = map(lambda N: N.disambiguate(), result) 2432 else: 2433 result = [] 2434 2435 memo_dict[memo_key] = result 2436 2437 return result
2438
2439 - def _createDir(self):
2440 # ensure that the directories for this node are 2441 # created. 2442 self.dir._create()
2443
2444 - def retrieve_from_cache(self):
2445 """Try to retrieve the node's content from a cache 2446 2447 This method is called from multiple threads in a parallel build, 2448 so only do thread safe stuff here. Do thread unsafe stuff in 2449 built(). 2450 2451 Returns true iff the node was successfully retrieved. 2452 """ 2453 if self.nocache: 2454 return None 2455 if not self.is_derived(): 2456 return None 2457 return self.get_build_env().get_CacheDir().retrieve(self)
2458
2459 - def built(self):
2460 """ 2461 Called just after this node is successfully built. 2462 """ 2463 # Push this file out to cache before the superclass Node.built() 2464 # method has a chance to clear the build signature, which it 2465 # will do if this file has a source scanner. 2466 # 2467 # We have to clear the memoized values *before* we push it to 2468 # cache so that the memoization of the self.exists() return 2469 # value doesn't interfere. 2470 self.clear_memoized_values() 2471 if self.exists(): 2472 self.get_build_env().get_CacheDir().push(self) 2473 SCons.Node.Node.built(self)
2474
2475 - def visited(self):
2476 if self.exists(): 2477 self.get_build_env().get_CacheDir().push_if_forced(self) 2478 2479 ninfo = self.get_ninfo() 2480 2481 csig = self.get_max_drift_csig() 2482 if csig: 2483 ninfo.csig = csig 2484 2485 ninfo.timestamp = self.get_timestamp() 2486 ninfo.size = self.get_size() 2487 2488 if not self.has_builder(): 2489 # This is a source file, but it might have been a target file 2490 # in another build that included more of the DAG. Copy 2491 # any build information that's stored in the .sconsign file 2492 # into our binfo object so it doesn't get lost. 2493 old = self.get_stored_info() 2494 self.get_binfo().__dict__.update(old.binfo.__dict__) 2495 2496 self.store_info()
2497
2498 - def find_src_builder(self):
2499 if self.rexists(): 2500 return None 2501 scb = self.dir.src_builder() 2502 if scb is _null: 2503 if diskcheck_sccs(self.dir, self.name): 2504 scb = get_DefaultSCCSBuilder() 2505 elif diskcheck_rcs(self.dir, self.name): 2506 scb = get_DefaultRCSBuilder() 2507 else: 2508 scb = None 2509 if scb is not None: 2510 try: 2511 b = self.builder 2512 except AttributeError: 2513 b = None 2514 if b is None: 2515 self.builder_set(scb) 2516 return scb
2517
2518 - def has_src_builder(self):
2519 """Return whether this Node has a source builder or not. 2520 2521 If this Node doesn't have an explicit source code builder, this 2522 is where we figure out, on the fly, if there's a transparent 2523 source code builder for it. 2524 2525 Note that if we found a source builder, we also set the 2526 self.builder attribute, so that all of the methods that actually 2527 *build* this file don't have to do anything different. 2528 """ 2529 try: 2530 scb = self.sbuilder 2531 except AttributeError: 2532 scb = self.sbuilder = self.find_src_builder() 2533 return not scb is None
2534
2535 - def alter_targets(self):
2536 """Return any corresponding targets in a variant directory. 2537 """ 2538 if self.is_derived(): 2539 return [], None 2540 return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
2541
2542 - def _rmv_existing(self):
2543 self.clear_memoized_values() 2544 e = Unlink(self, [], None) 2545 if isinstance(e, SCons.Errors.BuildError): 2546 raise e
2547 2548 # 2549 # Taskmaster interface subsystem 2550 # 2551
2552 - def make_ready(self):
2553 self.has_src_builder() 2554 self.get_binfo()
2555
2556 - def prepare(self):
2557 """Prepare for this file to be created.""" 2558 SCons.Node.Node.prepare(self) 2559 2560 if self.get_state() != SCons.Node.up_to_date: 2561 if self.exists(): 2562 if self.is_derived() and not self.precious: 2563 self._rmv_existing() 2564 else: 2565 try: 2566 self._createDir() 2567 except SCons.Errors.StopError, drive: 2568 desc = "No drive `%s' for target `%s'." % (drive, self) 2569 raise SCons.Errors.StopError, desc
2570 2571 # 2572 # 2573 # 2574
2575 - def remove(self):
2576 """Remove this file.""" 2577 if self.exists() or self.islink(): 2578 self.fs.unlink(self.path) 2579 return 1 2580 return None
2581
2582 - def do_duplicate(self, src):
2583 self._createDir() 2584 Unlink(self, None, None) 2585 e = Link(self, src, None) 2586 if isinstance(e, SCons.Errors.BuildError): 2587 desc = "Cannot duplicate `%s' in `%s': %s." % (src.path, self.dir.path, e.errstr) 2588 raise SCons.Errors.StopError, desc 2589 self.linked = 1 2590 # The Link() action may or may not have actually 2591 # created the file, depending on whether the -n 2592 # option was used or not. Delete the _exists and 2593 # _rexists attributes so they can be reevaluated. 2594 self.clear()
2595 2596 memoizer_counters.append(SCons.Memoize.CountValue('exists')) 2597
2598 - def exists(self):
2599 try: 2600 return self._memo['exists'] 2601 except KeyError: 2602 pass 2603 # Duplicate from source path if we are set up to do this. 2604 if self.duplicate and not self.is_derived() and not self.linked: 2605 src = self.srcnode() 2606 if not src is self: 2607 # At this point, src is meant to be copied in a variant directory. 2608 src = src.rfile() 2609 if src.abspath != self.abspath: 2610 if src.exists(): 2611 self.do_duplicate(src) 2612 # Can't return 1 here because the duplication might 2613 # not actually occur if the -n option is being used. 2614 else: 2615 # The source file does not exist. Make sure no old 2616 # copy remains in the variant directory. 2617 if Base.exists(self) or self.islink(): 2618 self.fs.unlink(self.path) 2619 # Return None explicitly because the Base.exists() call 2620 # above will have cached its value if the file existed. 2621 self._memo['exists'] = None 2622 return None 2623 result = Base.exists(self) 2624 self._memo['exists'] = result 2625 return result
2626 2627 # 2628 # SIGNATURE SUBSYSTEM 2629 # 2630
2631 - def get_max_drift_csig(self):
2632 """ 2633 Returns the content signature currently stored for this node 2634 if it's been unmodified longer than the max_drift value, or the 2635 max_drift value is 0. Returns None otherwise. 2636 """ 2637 old = self.get_stored_info() 2638 mtime = self.get_timestamp() 2639 2640 csig = None 2641 max_drift = self.fs.max_drift 2642 if max_drift > 0: 2643 if (time.time() - mtime) > max_drift: 2644 try: 2645 n = old.ninfo 2646 if n.timestamp and n.csig and n.timestamp == mtime: 2647 csig = n.csig 2648 except AttributeError: 2649 pass 2650 elif max_drift == 0: 2651 try: 2652 csig = old.ninfo.csig 2653 except AttributeError: 2654 pass 2655 2656 return csig
2657
2658 - def get_csig(self):
2659 """ 2660 Generate a node's content signature, the digested signature 2661 of its content. 2662 2663 node - the node 2664 cache - alternate node to use for the signature cache 2665 returns - the content signature 2666 """ 2667 ninfo = self.get_ninfo() 2668 try: 2669 return ninfo.csig 2670 except AttributeError: 2671 pass 2672 2673 csig = self.get_max_drift_csig() 2674 if csig is None: 2675 2676 try: 2677 contents = self.get_contents() 2678 except IOError: 2679 # This can happen if there's actually a directory on-disk, 2680 # which can be the case if they've disabled disk checks, 2681 # or if an action with a File target actually happens to 2682 # create a same-named directory by mistake. 2683 csig = '' 2684 else: 2685 csig = SCons.Util.MD5signature(contents) 2686 2687 ninfo.csig = csig 2688 2689 return csig
2690 2691 # 2692 # DECISION SUBSYSTEM 2693 # 2694
2695 - def builder_set(self, builder):
2696 SCons.Node.Node.builder_set(self, builder) 2697 self.changed_since_last_build = self.decide_target
2698
2699 - def changed_content(self, target, prev_ni):
2700 cur_csig = self.get_csig() 2701 try: 2702 return cur_csig != prev_ni.csig 2703 except AttributeError: 2704 return 1
2705
2706 - def changed_state(self, target, prev_ni):
2707 return (self.state != SCons.Node.up_to_date)
2708
2709 - def changed_timestamp_then_content(self, target, prev_ni):
2710 if not self.changed_timestamp_match(target, prev_ni): 2711 try: 2712 self.get_ninfo().csig = prev_ni.csig 2713 except AttributeError: 2714 pass 2715 return False 2716 return self.changed_content(target, prev_ni)
2717
2718 - def changed_timestamp_newer(self, target, prev_ni):
2719 try: 2720 return self.get_timestamp() > target.get_timestamp() 2721 except AttributeError: 2722 return 1
2723
2724 - def changed_timestamp_match(self, target, prev_ni):
2725 try: 2726 return self.get_timestamp() != prev_ni.timestamp 2727 except AttributeError: 2728 return 1
2729
2730 - def decide_source(self, target, prev_ni):
2731 return target.get_build_env().decide_source(self, target, prev_ni)
2732
2733 - def decide_target(self, target, prev_ni):
2734 return target.get_build_env().decide_target(self, target, prev_ni)
2735 2736 # Initialize this Node's decider function to decide_source() because 2737 # every file is a source file until it has a Builder attached... 2738 changed_since_last_build = decide_source 2739
2740 - def is_up_to_date(self):
2741 T = 0 2742 if T: Trace('is_up_to_date(%s):' % self) 2743 if not self.exists(): 2744 if T: Trace(' not self.exists():') 2745 # The file doesn't exist locally... 2746 r = self.rfile() 2747 if r != self: 2748 # ...but there is one in a Repository... 2749 if not self.changed(r): 2750 if T: Trace(' changed(%s):' % r) 2751 # ...and it's even up-to-date... 2752 if self._local: 2753 # ...and they'd like a local copy. 2754 e = LocalCopy(self, r, None) 2755 if isinstance(e, SCons.Errors.BuildError): 2756 raise 2757 self.store_info() 2758 if T: Trace(' 1\n') 2759 return 1 2760 self.changed() 2761 if T: Trace(' None\n') 2762 return None 2763 else: 2764 r = self.changed() 2765 if T: Trace(' self.exists(): %s\n' % r) 2766 return not r
2767 2768 memoizer_counters.append(SCons.Memoize.CountValue('rfile')) 2769
2770 - def rfile(self):
2771 try: 2772 return self._memo['rfile'] 2773 except KeyError: 2774 pass 2775 result = self 2776 if not self.exists(): 2777 norm_name = _my_normcase(self.name) 2778 for dir in self.dir.get_all_rdirs(): 2779 try: node = dir.entries[norm_name] 2780 except KeyError: node = dir.file_on_disk(self.name) 2781 if node and node.exists() and \ 2782 (isinstance(node, File) or isinstance(node, Entry) \ 2783 or not node.is_derived()): 2784 result = node 2785 break 2786 self._memo['rfile'] = result 2787 return result
2788
2789 - def rstr(self):
2790 return str(self.rfile())
2791
2792 - def get_cachedir_csig(self):
2793 """ 2794 Fetch a Node's content signature for purposes of computing 2795 another Node's cachesig. 2796 2797 This is a wrapper around the normal get_csig() method that handles 2798 the somewhat obscure case of using CacheDir with the -n option. 2799 Any files that don't exist would normally be "built" by fetching 2800 them from the cache, but the normal get_csig() method will try 2801 to open up the local file, which doesn't exist because the -n 2802 option meant we didn't actually pull the file from cachedir. 2803 But since the file *does* actually exist in the cachedir, we 2804 can use its contents for the csig. 2805 """ 2806 try: 2807 return self.cachedir_csig 2808 except AttributeError: 2809 pass 2810 2811 cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self) 2812 if not self.exists() and cachefile and os.path.exists(cachefile): 2813 contents = open(cachefile, 'rb').read() 2814 self.cachedir_csig = SCons.Util.MD5signature(contents) 2815 else: 2816 self.cachedir_csig = self.get_csig() 2817 return self.cachedir_csig
2818
2819 - def get_cachedir_bsig(self):
2820 try: 2821 return self.cachesig 2822 except AttributeError: 2823 pass 2824 2825 # Add the path to the cache signature, because multiple 2826 # targets built by the same action will all have the same 2827 # build signature, and we have to differentiate them somehow. 2828 children = self.children() 2829 sigs = map(lambda n: n.get_cachedir_csig(), children) 2830 executor = self.get_executor() 2831 sigs.append(SCons.Util.MD5signature(executor.get_contents())) 2832 sigs.append(self.path) 2833 self.cachesig = SCons.Util.MD5collect(sigs) 2834 return self.cachesig
2835 2836 default_fs = None 2837
2838 -def get_default_fs():
2839 global default_fs 2840 if not default_fs: 2841 default_fs = FS() 2842 return default_fs
2843
2844 -class FileFinder:
2845 """ 2846 """ 2847 if SCons.Memoize.use_memoizer: 2848 __metaclass__ = SCons.Memoize.Memoized_Metaclass 2849 2850 memoizer_counters = [] 2851
2852 - def __init__(self):
2853 self._memo = {}
2854
2855 - def filedir_lookup(self, p, fd=None):
2856 """ 2857 A helper method for find_file() that looks up a directory for 2858 a file we're trying to find. This only creates the Dir Node if 2859 it exists on-disk, since if the directory doesn't exist we know 2860 we won't find any files in it... :-) 2861 2862 It would be more compact to just use this as a nested function 2863 with a default keyword argument (see the commented-out version 2864 below), but that doesn't work unless you have nested scopes, 2865 so we define it here just so this work under Python 1.5.2. 2866 """ 2867 if fd is None: 2868 fd = self.default_filedir 2869 dir, name = os.path.split(fd) 2870 drive, d = os.path.splitdrive(dir) 2871 if d in ('/', os.sep): 2872 return p.fs.get_root(drive).dir_on_disk(name) 2873 if dir: 2874 p = self.filedir_lookup(p, dir) 2875 if not p: 2876 return None 2877 norm_name = _my_normcase(name) 2878 try: 2879 node = p.entries[norm_name] 2880 except KeyError: 2881 return p.dir_on_disk(name) 2882 if isinstance(node, Dir): 2883 return node 2884 if isinstance(node, Entry): 2885 node.must_be_same(Dir) 2886 return node 2887 return None
2888
2889 - def _find_file_key(self, filename, paths, verbose=None):
2890 return (filename, paths)
2891 2892 memoizer_counters.append(SCons.Memoize.CountDict('find_file', _find_file_key)) 2893
2894 - def find_file(self, filename, paths, verbose=None):
2895 """ 2896 find_file(str, [Dir()]) -> [nodes] 2897 2898 filename - a filename to find 2899 paths - a list of directory path *nodes* to search in. Can be 2900 represented as a list, a tuple, or a callable that is 2901 called with no arguments and returns the list or tuple. 2902 2903 returns - the node created from the found file. 2904 2905 Find a node corresponding to either a derived file or a file 2906 that exists already. 2907 2908 Only the first file found is returned, and none is returned 2909 if no file is found. 2910 """ 2911 memo_key = self._find_file_key(filename, paths) 2912 try: 2913 memo_dict = self._memo['find_file'] 2914 except KeyError: 2915 memo_dict = {} 2916 self._memo['find_file'] = memo_dict 2917 else: 2918 try: 2919 return memo_dict[memo_key] 2920 except KeyError: 2921 pass 2922 2923 if verbose: 2924 if not SCons.Util.is_String(verbose): 2925 verbose = "find_file" 2926 if not callable(verbose): 2927 verbose = ' %s: ' % verbose 2928 verbose = lambda s, v=verbose: sys.stdout.write(v + s) 2929 else: 2930 verbose = lambda x: x 2931 2932 filedir, filename = os.path.split(filename) 2933 if filedir: 2934 # More compact code that we can't use until we drop 2935 # support for Python 1.5.2: 2936 # 2937 #def filedir_lookup(p, fd=filedir): 2938 # """ 2939 # A helper function that looks up a directory for a file 2940 # we're trying to find. This only creates the Dir Node 2941 # if it exists on-disk, since if the directory doesn't 2942 # exist we know we won't find any files in it... :-) 2943 # """ 2944 # dir, name = os.path.split(fd) 2945 # if dir: 2946 # p = filedir_lookup(p, dir) 2947 # if not p: 2948 # return None 2949 # norm_name = _my_normcase(name) 2950 # try: 2951 # node = p.entries[norm_name] 2952 # except KeyError: 2953 # return p.dir_on_disk(name) 2954 # if isinstance(node, Dir): 2955 # return node 2956 # if isinstance(node, Entry): 2957 # node.must_be_same(Dir) 2958 # return node 2959 # if isinstance(node, Dir) or isinstance(node, Entry): 2960 # return node 2961 # return None 2962 #paths = filter(None, map(filedir_lookup, paths)) 2963 2964 self.default_filedir = filedir 2965 paths = filter(None, map(self.filedir_lookup, paths)) 2966 2967 result = None 2968 for dir in paths: 2969 verbose("looking for '%s' in '%s' ...\n" % (filename, dir)) 2970 node, d = dir.srcdir_find_file(filename) 2971 if node: 2972 verbose("... FOUND '%s' in '%s'\n" % (filename, d)) 2973 result = node 2974 break 2975 2976 memo_dict[memo_key] = result 2977 2978 return result
2979 2980 find_file = FileFinder().find_file 2981