Package SCons :: Package Node :: Module FS
[hide private]
[frames] | no frames]

Source Code for Module SCons.Node.FS

   1  """scons.Node.FS 
   2   
   3  File system nodes. 
   4   
   5  These Nodes represent the canonical external objects that people think 
   6  of when they think of building software: files and directories. 
   7   
   8  This holds a "default_fs" variable that should be initialized with an FS 
   9  that can be used by scripts or modules looking for the canonical default. 
  10   
  11  """ 
  12   
  13  # 
  14  # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation 
  15  # 
  16  # Permission is hereby granted, free of charge, to any person obtaining 
  17  # a copy of this software and associated documentation files (the 
  18  # "Software"), to deal in the Software without restriction, including 
  19  # without limitation the rights to use, copy, modify, merge, publish, 
  20  # distribute, sublicense, and/or sell copies of the Software, and to 
  21  # permit persons to whom the Software is furnished to do so, subject to 
  22  # the following conditions: 
  23  # 
  24  # The above copyright notice and this permission notice shall be included 
  25  # in all copies or substantial portions of the Software. 
  26  # 
  27  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  28  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  29  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  30  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  31  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  32  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  33  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  34  # 
  35   
  36  __revision__ = "src/engine/SCons/Node/FS.py 2928 2008/04/29 22:44:09 knight" 
  37   
  38  import fnmatch 
  39  from itertools import izip 
  40  import os 
  41  import os.path 
  42  import re 
  43  import shutil 
  44  import stat 
  45  import string 
  46  import sys 
  47  import time 
  48  import cStringIO 
  49   
  50  import SCons.Action 
  51  from SCons.Debug import logInstanceCreation 
  52  import SCons.Errors 
  53  import SCons.Memoize 
  54  import SCons.Node 
  55  import SCons.Node.Alias 
  56  import SCons.Subst 
  57  import SCons.Util 
  58  import SCons.Warnings 
  59   
  60  from SCons.Debug import Trace 
  61   
  62  # The max_drift value:  by default, use a cached signature value for 
  63  # any file that's been untouched for more than two days. 
  64  default_max_drift = 2*24*60*60 
  65   
  66  # 
  67  # We stringify these file system Nodes a lot.  Turning a file system Node 
  68  # into a string is non-trivial, because the final string representation 
  69  # can depend on a lot of factors:  whether it's a derived target or not, 
  70  # whether it's linked to a repository or source directory, and whether 
  71  # there's duplication going on.  The normal technique for optimizing 
  72  # calculations like this is to memoize (cache) the string value, so you 
  73  # only have to do the calculation once. 
  74  # 
  75  # A number of the above factors, however, can be set after we've already 
  76  # been asked to return a string for a Node, because a Repository() or 
  77  # VariantDir() call or the like may not occur until later in SConscript 
  78  # files.  So this variable controls whether we bother trying to save 
  79  # string values for Nodes.  The wrapper interface can set this whenever 
  80  # they're done mucking with Repository and VariantDir and the other stuff, 
  81  # to let this module know it can start returning saved string values 
  82  # for Nodes. 
  83  # 
  84  Save_Strings = None 
  85   
86 -def save_strings(val):
87 global Save_Strings 88 Save_Strings = val
89 90 # 91 # Avoid unnecessary function calls by recording a Boolean value that 92 # tells us whether or not os.path.splitdrive() actually does anything 93 # on this system, and therefore whether we need to bother calling it 94 # when looking up path names in various methods below. 95 # 96 97 do_splitdrive = None 98
99 -def initialize_do_splitdrive():
100 global do_splitdrive 101 drive, path = os.path.splitdrive('X:/foo') 102 do_splitdrive = not not drive
103 104 initialize_do_splitdrive() 105 106 # 107 108 needs_normpath_check = None 109
110 -def initialize_normpath_check():
111 """ 112 Initialize the normpath_check regular expression. 113 114 This function is used by the unit tests to re-initialize the pattern 115 when testing for behavior with different values of os.sep. 116 """ 117 global needs_normpath_check 118 if os.sep == '/': 119 pattern = r'.*/|\.$|\.\.$' 120 else: 121 pattern = r'.*[/%s]|\.$|\.\.$' % re.escape(os.sep) 122 needs_normpath_check = re.compile(pattern)
123 124 initialize_normpath_check() 125 126 # 127 # SCons.Action objects for interacting with the outside world. 128 # 129 # The Node.FS methods in this module should use these actions to 130 # create and/or remove files and directories; they should *not* use 131 # os.{link,symlink,unlink,mkdir}(), etc., directly. 132 # 133 # Using these SCons.Action objects ensures that descriptions of these 134 # external activities are properly displayed, that the displays are 135 # suppressed when the -s (silent) option is used, and (most importantly) 136 # the actions are disabled when the the -n option is used, in which case 137 # there should be *no* changes to the external file system(s)... 138 # 139 140 if hasattr(os, 'link'): 153 else: 154 _hardlink_func = None 155 156 if hasattr(os, 'symlink'): 159 else: 160 _softlink_func = None 161
162 -def _copy_func(fs, src, dest):
163 shutil.copy2(src, dest) 164 st = fs.stat(src) 165 fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
166 167 168 Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy', 169 'hard-copy', 'soft-copy', 'copy'] 170 171 Link_Funcs = [] # contains the callables of the specified duplication style 172
173 -def set_duplicate(duplicate):
174 # Fill in the Link_Funcs list according to the argument 175 # (discarding those not available on the platform). 176 177 # Set up the dictionary that maps the argument names to the 178 # underlying implementations. We do this inside this function, 179 # not in the top-level module code, so that we can remap os.link 180 # and os.symlink for testing purposes. 181 link_dict = { 182 'hard' : _hardlink_func, 183 'soft' : _softlink_func, 184 'copy' : _copy_func 185 } 186 187 if not duplicate in Valid_Duplicates: 188 raise SCons.Errors.InternalError, ("The argument of set_duplicate " 189 "should be in Valid_Duplicates") 190 global Link_Funcs 191 Link_Funcs = [] 192 for func in string.split(duplicate,'-'): 193 if link_dict[func]: 194 Link_Funcs.append(link_dict[func])
195
196 -def LinkFunc(target, source, env):
197 # Relative paths cause problems with symbolic links, so 198 # we use absolute paths, which may be a problem for people 199 # who want to move their soft-linked src-trees around. Those 200 # people should use the 'hard-copy' mode, softlinks cannot be 201 # used for that; at least I have no idea how ... 202 src = source[0].abspath 203 dest = target[0].abspath 204 dir, file = os.path.split(dest) 205 if dir and not target[0].fs.isdir(dir): 206 os.makedirs(dir) 207 if not Link_Funcs: 208 # Set a default order of link functions. 209 set_duplicate('hard-soft-copy') 210 fs = source[0].fs 211 # Now link the files with the previously specified order. 212 for func in Link_Funcs: 213 try: 214 func(fs, src, dest) 215 break 216 except (IOError, OSError): 217 # An OSError indicates something happened like a permissions 218 # problem or an attempt to symlink across file-system 219 # boundaries. An IOError indicates something like the file 220 # not existing. In either case, keeping trying additional 221 # functions in the list and only raise an error if the last 222 # one failed. 223 if func == Link_Funcs[-1]: 224 # exception of the last link method (copy) are fatal 225 raise 226 else: 227 pass 228 return 0
229 230 Link = SCons.Action.Action(LinkFunc, None)
231 -def LocalString(target, source, env):
232 return 'Local copy of %s from %s' % (target[0], source[0])
233 234 LocalCopy = SCons.Action.Action(LinkFunc, LocalString) 235
236 -def UnlinkFunc(target, source, env):
237 t = target[0] 238 t.fs.unlink(t.abspath) 239 return 0
240 241 Unlink = SCons.Action.Action(UnlinkFunc, None) 242
243 -def MkdirFunc(target, source, env):
244 t = target[0] 245 if not t.exists(): 246 t.fs.mkdir(t.abspath) 247 return 0
248 249 Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None) 250 251 MkdirBuilder = None 252
253 -def get_MkdirBuilder():
254 global MkdirBuilder 255 if MkdirBuilder is None: 256 import SCons.Builder 257 import SCons.Defaults 258 # "env" will get filled in by Executor.get_build_env() 259 # calling SCons.Defaults.DefaultEnvironment() when necessary. 260 MkdirBuilder = SCons.Builder.Builder(action = Mkdir, 261 env = None, 262 explain = None, 263 is_explicit = None, 264 target_scanner = SCons.Defaults.DirEntryScanner, 265 name = "MkdirBuilder") 266 return MkdirBuilder
267
268 -class _Null:
269 pass
270 271 _null = _Null() 272 273 DefaultSCCSBuilder = None 274 DefaultRCSBuilder = None 275
276 -def get_DefaultSCCSBuilder():
277 global DefaultSCCSBuilder 278 if DefaultSCCSBuilder is None: 279 import SCons.Builder 280 # "env" will get filled in by Executor.get_build_env() 281 # calling SCons.Defaults.DefaultEnvironment() when necessary. 282 act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR') 283 DefaultSCCSBuilder = SCons.Builder.Builder(action = act, 284 env = None, 285 name = "DefaultSCCSBuilder") 286 return DefaultSCCSBuilder
287
288 -def get_DefaultRCSBuilder():
289 global DefaultRCSBuilder 290 if DefaultRCSBuilder is None: 291 import SCons.Builder 292 # "env" will get filled in by Executor.get_build_env() 293 # calling SCons.Defaults.DefaultEnvironment() when necessary. 294 act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR') 295 DefaultRCSBuilder = SCons.Builder.Builder(action = act, 296 env = None, 297 name = "DefaultRCSBuilder") 298 return DefaultRCSBuilder
299 300 # Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem. 301 _is_cygwin = sys.platform == "cygwin" 302 if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
303 - def _my_normcase(x):
304 return x
305 else:
306 - def _my_normcase(x):
307 return string.upper(x)
308 309 310
311 -class DiskChecker:
312 - def __init__(self, type, do, ignore):
313 self.type = type 314 self.do = do 315 self.ignore = ignore 316 self.set_do()
317 - def set_do(self):
318 self.__call__ = self.do
319 - def set_ignore(self):
320 self.__call__ = self.ignore
321 - def set(self, list):
322 if self.type in list: 323 self.set_do() 324 else: 325 self.set_ignore()
326
327 -def do_diskcheck_match(node, predicate, errorfmt):
328 result = predicate() 329 try: 330 # If calling the predicate() cached a None value from stat(), 331 # remove it so it doesn't interfere with later attempts to 332 # build this Node as we walk the DAG. (This isn't a great way 333 # to do this, we're reaching into an interface that doesn't 334 # really belong to us, but it's all about performance, so 335 # for now we'll just document the dependency...) 336 if node._memo['stat'] is None: 337 del node._memo['stat'] 338 except (AttributeError, KeyError): 339 pass 340 if result: 341 raise TypeError, errorfmt % node.abspath
342
343 -def ignore_diskcheck_match(node, predicate, errorfmt):
344 pass
345
346 -def do_diskcheck_rcs(node, name):
347 try: 348 rcs_dir = node.rcs_dir 349 except AttributeError: 350 if node.entry_exists_on_disk('RCS'): 351 rcs_dir = node.Dir('RCS') 352 else: 353 rcs_dir = None 354 node.rcs_dir = rcs_dir 355 if rcs_dir: 356 return rcs_dir.entry_exists_on_disk(name+',v') 357 return None
358
359 -def ignore_diskcheck_rcs(node, name):
360 return None
361
362 -def do_diskcheck_sccs(node, name):
363 try: 364 sccs_dir = node.sccs_dir 365 except AttributeError: 366 if node.entry_exists_on_disk('SCCS'): 367 sccs_dir = node.Dir('SCCS') 368 else: 369 sccs_dir = None 370 node.sccs_dir = sccs_dir 371 if sccs_dir: 372 return sccs_dir.entry_exists_on_disk('s.'+name) 373 return None
374
375 -def ignore_diskcheck_sccs(node, name):
376 return None
377 378 diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match) 379 diskcheck_rcs = DiskChecker('rcs', do_diskcheck_rcs, ignore_diskcheck_rcs) 380 diskcheck_sccs = DiskChecker('sccs', do_diskcheck_sccs, ignore_diskcheck_sccs) 381 382 diskcheckers = [ 383 diskcheck_match, 384 diskcheck_rcs, 385 diskcheck_sccs, 386 ] 387
388 -def set_diskcheck(list):
389 for dc in diskcheckers: 390 dc.set(list)
391
392 -def diskcheck_types():
393 return map(lambda dc: dc.type, diskcheckers)
394 395 396
397 -class EntryProxy(SCons.Util.Proxy):
398 - def __get_abspath(self):
399 entry = self.get() 400 return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(), 401 entry.name + "_abspath")
402
403 - def __get_filebase(self):
404 name = self.get().name 405 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0], 406 name + "_filebase")
407
408 - def __get_suffix(self):
409 name = self.get().name 410 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1], 411 name + "_suffix")
412
413 - def __get_file(self):
414 name = self.get().name 415 return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
416
417 - def __get_base_path(self):
418 """Return the file's directory and file name, with the 419 suffix stripped.""" 420 entry = self.get() 421 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0], 422 entry.name + "_base")
423
424 - def __get_posix_path(self):
425 """Return the path with / as the path separator, 426 regardless of platform.""" 427 if os.sep == '/': 428 return self 429 else: 430 entry = self.get() 431 r = string.replace(entry.get_path(), os.sep, '/') 432 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
433
434 - def __get_windows_path(self):
435 """Return the path with \ as the path separator, 436 regardless of platform.""" 437 if os.sep == '\\': 438 return self 439 else: 440 entry = self.get() 441 r = string.replace(entry.get_path(), os.sep, '\\') 442 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
443
444 - def __get_srcnode(self):
445 return EntryProxy(self.get().srcnode())
446
447 - def __get_srcdir(self):
448 """Returns the directory containing the source node linked to this 449 node via VariantDir(), or the directory of this node if not linked.""" 450 return EntryProxy(self.get().srcnode().dir)
451
452 - def __get_rsrcnode(self):
453 return EntryProxy(self.get().srcnode().rfile())
454
455 - def __get_rsrcdir(self):
456 """Returns the directory containing the source node linked to this 457 node via VariantDir(), or the directory of this node if not linked.""" 458 return EntryProxy(self.get().srcnode().rfile().dir)
459
460 - def __get_dir(self):
461 return EntryProxy(self.get().dir)
462 463 dictSpecialAttrs = { "base" : __get_base_path, 464 "posix" : __get_posix_path, 465 "windows" : __get_windows_path, 466 "win32" : __get_windows_path, 467 "srcpath" : __get_srcnode, 468 "srcdir" : __get_srcdir, 469 "dir" : __get_dir, 470 "abspath" : __get_abspath, 471 "filebase" : __get_filebase, 472 "suffix" : __get_suffix, 473 "file" : __get_file, 474 "rsrcpath" : __get_rsrcnode, 475 "rsrcdir" : __get_rsrcdir, 476 } 477
478 - def __getattr__(self, name):
479 # This is how we implement the "special" attributes 480 # such as base, posix, srcdir, etc. 481 try: 482 attr_function = self.dictSpecialAttrs[name] 483 except KeyError: 484 try: 485 attr = SCons.Util.Proxy.__getattr__(self, name) 486 except AttributeError: 487 entry = self.get() 488 classname = string.split(str(entry.__class__), '.')[-1] 489 if classname[-2:] == "'>": 490 # new-style classes report their name as: 491 # "<class 'something'>" 492 # instead of the classic classes: 493 # "something" 494 classname = classname[:-2] 495 raise AttributeError, "%s instance '%s' has no attribute '%s'" % (classname, entry.name, name) 496 return attr 497 else: 498 return attr_function(self)
499
500 -class Base(SCons.Node.Node):
501 """A generic class for file system entries. This class is for 502 when we don't know yet whether the entry being looked up is a file 503 or a directory. Instances of this class can morph into either 504 Dir or File objects by a later, more precise lookup. 505 506 Note: this class does not define __cmp__ and __hash__ for 507 efficiency reasons. SCons does a lot of comparing of 508 Node.FS.{Base,Entry,File,Dir} objects, so those operations must be 509 as fast as possible, which means we want to use Python's built-in 510 object identity comparisons. 511 """ 512 513 memoizer_counters = [] 514
515 - def __init__(self, name, directory, fs):
516 """Initialize a generic Node.FS.Base object. 517 518 Call the superclass initialization, take care of setting up 519 our relative and absolute paths, identify our parent 520 directory, and indicate that this node should use 521 signatures.""" 522 if __debug__: logInstanceCreation(self, 'Node.FS.Base') 523 SCons.Node.Node.__init__(self) 524 525 self.name = name 526 self.suffix = SCons.Util.splitext(name)[1] 527 self.fs = fs 528 529 assert directory, "A directory must be provided" 530 531 self.abspath = directory.entry_abspath(name) 532 self.labspath = directory.entry_labspath(name) 533 if directory.path == '.': 534 self.path = name 535 else: 536 self.path = directory.entry_path(name) 537 if directory.tpath == '.': 538 self.tpath = name 539 else: 540 self.tpath = directory.entry_tpath(name) 541 self.path_elements = directory.path_elements + [self] 542 543 self.dir = directory 544 self.cwd = None # will hold the SConscript directory for target nodes 545 self.duplicate = directory.duplicate
546
547 - def must_be_same(self, klass):
548 """ 549 This node, which already existed, is being looked up as the 550 specified klass. Raise an exception if it isn't. 551 """ 552 if self.__class__ is klass or klass is Entry: 553 return 554 raise TypeError, "Tried to lookup %s '%s' as a %s." %\ 555 (self.__class__.__name__, self.path, klass.__name__)
556
557 - def get_dir(self):
558 return self.dir
559
560 - def get_suffix(self):
561 return self.suffix
562
563 - def rfile(self):
564 return self
565
566 - def __str__(self):
567 """A Node.FS.Base object's string representation is its path 568 name.""" 569 global Save_Strings 570 if Save_Strings: 571 return self._save_str() 572 return self._get_str()
573 574 memoizer_counters.append(SCons.Memoize.CountValue('_save_str')) 575
576 - def _save_str(self):
577 try: 578 return self._memo['_save_str'] 579 except KeyError: 580 pass 581 result = self._get_str() 582 self._memo['_save_str'] = result 583 return result
584
585 - def _get_str(self):
586 global Save_Strings 587 if self.duplicate or self.is_derived(): 588 return self.get_path() 589 srcnode = self.srcnode() 590 if srcnode.stat() is None and not self.stat() is None: 591 result = self.get_path() 592 else: 593 result = srcnode.get_path() 594 if not Save_Strings: 595 # We're not at the point where we're saving the string string 596 # representations of FS Nodes (because we haven't finished 597 # reading the SConscript files and need to have str() return 598 # things relative to them). That also means we can't yet 599 # cache values returned (or not returned) by stat(), since 600 # Python code in the SConscript files might still create 601 # or otherwise affect the on-disk file. So get rid of the 602 # values that the underlying stat() method saved. 603 try: del self._memo['stat'] 604 except KeyError: pass 605 if not self is srcnode: 606 try: del srcnode._memo['stat'] 607 except KeyError: pass 608 return result
609 610 rstr = __str__ 611 612 memoizer_counters.append(SCons.Memoize.CountValue('stat')) 613
614 - def stat(self):
615 try: return self._memo['stat'] 616 except KeyError: pass 617 try: result = self.fs.stat(self.abspath) 618 except os.error: result = None 619 self._memo['stat'] = result 620 return result
621
622 - def exists(self):
623 return not self.stat() is None
624
625 - def rexists(self):
626 return self.rfile().exists()
627
628 - def getmtime(self):
629 st = self.stat() 630 if st: return st[stat.ST_MTIME] 631 else: return None
632
633 - def getsize(self):
634 st = self.stat() 635 if st: return st[stat.ST_SIZE] 636 else: return None
637
638 - def isdir(self):
639 st = self.stat() 640 return not st is None and stat.S_ISDIR(st[stat.ST_MODE])
641
642 - def isfile(self):
643 st = self.stat() 644 return not st is None and stat.S_ISREG(st[stat.ST_MODE])
645 646 if hasattr(os, 'symlink'): 651 else: 654
655 - def is_under(self, dir):
656 if self is dir: 657 return 1 658 else: 659 return self.dir.is_under(dir)
660
661 - def set_local(self):
662 self._local = 1
663
664 - def srcnode(self):
665 """If this node is in a build path, return the node 666 corresponding to its source file. Otherwise, return 667 ourself. 668 """ 669 srcdir_list = self.dir.srcdir_list() 670 if srcdir_list: 671 srcnode = srcdir_list[0].Entry(self.name) 672 srcnode.must_be_same(self.__class__) 673 return srcnode 674 return self
675
676 - def get_path(self, dir=None):
677 """Return path relative to the current working directory of the 678 Node.FS.Base object that owns us.""" 679 if not dir: 680 dir = self.fs.getcwd() 681 if self == dir: 682 return '.' 683 path_elems = self.path_elements 684 try: i = path_elems.index(dir) 685 except ValueError: pass 686 else: path_elems = path_elems[i+1:] 687 path_elems = map(lambda n: n.name, path_elems) 688 return string.join(path_elems, os.sep)
689
690 - def set_src_builder(self, builder):
691 """Set the source code builder for this node.""" 692 self.sbuilder = builder 693 if not self.has_builder(): 694 self.builder_set(builder)
695
696 - def src_builder(self):
697 """Fetch the source code builder for this node. 698 699 If there isn't one, we cache the source code builder specified 700 for the directory (which in turn will cache the value from its 701 parent directory, and so on up to the file system root). 702 """ 703 try: 704 scb = self.sbuilder 705 except AttributeError: 706 scb = self.dir.src_builder() 707 self.sbuilder = scb 708 return scb
709
710 - def get_abspath(self):
711 """Get the absolute path of the file.""" 712 return self.abspath
713
714 - def for_signature(self):
715 # Return just our name. Even an absolute path would not work, 716 # because that can change thanks to symlinks or remapped network 717 # paths. 718 return self.name
719
720 - def get_subst_proxy(self):
721 try: 722 return self._proxy 723 except AttributeError: 724 ret = EntryProxy(self) 725 self._proxy = ret 726 return ret
727
728 - def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
729 """ 730 731 Generates a target entry that corresponds to this entry (usually 732 a source file) with the specified prefix and suffix. 733 734 Note that this method can be overridden dynamically for generated 735 files that need different behavior. See Tool/swig.py for 736 an example. 737 """ 738 return self.dir.Entry(prefix + splitext(self.name)[0] + suffix)
739
740 - def _Rfindalldirs_key(self, pathlist):
741 return pathlist
742 743 memoizer_counters.append(SCons.Memoize.CountDict('Rfindalldirs', _Rfindalldirs_key)) 744
745 - def Rfindalldirs(self, pathlist):
746 """ 747 Return all of the directories for a given path list, including 748 corresponding "backing" directories in any repositories. 749 750 The Node lookups are relative to this Node (typically a 751 directory), so memoizing result saves cycles from looking 752 up the same path for each target in a given directory. 753 """ 754 try: 755 memo_dict = self._memo['Rfindalldirs'] 756 except KeyError: 757 memo_dict = {} 758 self._memo['Rfindalldirs'] = memo_dict 759 else: 760 try: 761 return memo_dict[pathlist] 762 except KeyError: 763 pass 764 765 create_dir_relative_to_self = self.Dir 766 result = [] 767 for path in pathlist: 768 if isinstance(path, SCons.Node.Node): 769 result.append(path) 770 else: 771 dir = create_dir_relative_to_self(path) 772 result.extend(dir.get_all_rdirs()) 773 774 memo_dict[pathlist] = result 775 776 return result
777
778 - def RDirs(self, pathlist):
779 """Search for a list of directories in the Repository list.""" 780 cwd = self.cwd or self.fs._cwd 781 return cwd.Rfindalldirs(pathlist)
782 783 memoizer_counters.append(SCons.Memoize.CountValue('rentry')) 784
785 - def rentry(self):
786 try: 787 return self._memo['rentry'] 788 except KeyError: 789 pass 790 result = self 791 if not self.exists(): 792 norm_name = _my_normcase(self.name) 793 for dir in self.dir.get_all_rdirs(): 794 try: 795 node = dir.entries[norm_name] 796 except KeyError: 797 if dir.entry_exists_on_disk(self.name): 798 result = dir.Entry(self.name) 799 break 800 self._memo['rentry'] = result 801 return result
802
803 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
804 return []
805
806 -class Entry(Base):
807 """This is the class for generic Node.FS entries--that is, things 808 that could be a File or a Dir, but we're just not sure yet. 809 Consequently, the methods in this class really exist just to 810 transform their associated object into the right class when the 811 time comes, and then call the same-named method in the transformed 812 class.""" 813
814 - def diskcheck_match(self):
815 pass
816
817 - def disambiguate(self, must_exist=None):
818 """ 819 """ 820 if self.isdir(): 821 self.__class__ = Dir 822 self._morph() 823 elif self.isfile(): 824 self.__class__ = File 825 self._morph() 826 self.clear() 827 else: 828 # There was nothing on-disk at this location, so look in 829 # the src directory. 830 # 831 # We can't just use self.srcnode() straight away because 832 # that would create an actual Node for this file in the src 833 # directory, and there might not be one. Instead, use the 834 # dir_on_disk() method to see if there's something on-disk 835 # with that name, in which case we can go ahead and call 836 # self.srcnode() to create the right type of entry. 837 srcdir = self.dir.srcnode() 838 if srcdir != self.dir and \ 839 srcdir.entry_exists_on_disk(self.name) and \ 840 self.srcnode().isdir(): 841 self.__class__ = Dir 842 self._morph() 843 elif must_exist: 844 msg = "No such file or directory: '%s'" % self.abspath 845 raise SCons.Errors.UserError, msg 846 else: 847 self.__class__ = File 848 self._morph() 849 self.clear() 850 return self
851
852 - def rfile(self):
853 """We're a generic Entry, but the caller is actually looking for 854 a File at this point, so morph into one.""" 855 self.__class__ = File 856 self._morph() 857 self.clear() 858 return File.rfile(self)
859
860 - def scanner_key(self):
861 return self.get_suffix()
862
863 - def get_contents(self):
864 """Fetch the contents of the entry. 865 866 Since this should return the real contents from the file 867 system, we check to see into what sort of subclass we should 868 morph this Entry.""" 869 try: 870 self = self.disambiguate(must_exist=1) 871 except SCons.Errors.UserError: 872 # There was nothing on disk with which to disambiguate 873 # this entry. Leave it as an Entry, but return a null 874 # string so calls to get_contents() in emitters and the 875 # like (e.g. in qt.py) don't have to disambiguate by hand 876 # or catch the exception. 877 return '' 878 else: 879 return self.get_contents()
880
881 - def must_be_same(self, klass):
882 """Called to make sure a Node is a Dir. Since we're an 883 Entry, we can morph into one.""" 884 if not self.__class__ is klass: 885 self.__class__ = klass 886 self._morph() 887 self.clear
888 889 # The following methods can get called before the Taskmaster has 890 # had a chance to call disambiguate() directly to see if this Entry 891 # should really be a Dir or a File. We therefore use these to call 892 # disambiguate() transparently (from our caller's point of view). 893 # 894 # Right now, this minimal set of methods has been derived by just 895 # looking at some of the methods that will obviously be called early 896 # in any of the various Taskmasters' calling sequences, and then 897 # empirically figuring out which additional methods are necessary 898 # to make various tests pass. 899
900 - def exists(self):
901 """Return if the Entry exists. Check the file system to see 902 what we should turn into first. Assume a file if there's no 903 directory.""" 904 return self.disambiguate().exists()
905
906 - def rel_path(self, other):
907 d = self.disambiguate() 908 if d.__class__ == Entry: 909 raise "rel_path() could not disambiguate File/Dir" 910 return d.rel_path(other)
911
912 - def new_ninfo(self):
913 return self.disambiguate().new_ninfo()
914
915 - def changed_since_last_build(self, target, prev_ni):
916 return self.disambiguate().changed_since_last_build(target, prev_ni)
917
918 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
919 return self.disambiguate()._glob1(pattern, ondisk, source, strings)
920 921 # This is for later so we can differentiate between Entry the class and Entry 922 # the method of the FS class. 923 _classEntry = Entry 924 925
926 -class LocalFS:
927 928 if SCons.Memoize.use_memoizer: 929 __metaclass__ = SCons.Memoize.Memoized_Metaclass 930 931 # This class implements an abstraction layer for operations involving 932 # a local file system. Essentially, this wraps any function in 933 # the os, os.path or shutil modules that we use to actually go do 934 # anything with or to the local file system. 935 # 936 # Note that there's a very good chance we'll refactor this part of 937 # the architecture in some way as we really implement the interface(s) 938 # for remote file system Nodes. For example, the right architecture 939 # might be to have this be a subclass instead of a base class. 940 # Nevertheless, we're using this as a first step in that direction. 941 # 942 # We're not using chdir() yet because the calling subclass method 943 # needs to use os.chdir() directly to avoid recursion. Will we 944 # really need this one? 945 #def chdir(self, path): 946 # return os.chdir(path)
947 - def chmod(self, path, mode):
948 return os.chmod(path, mode)
949 - def copy(self, src, dst):
950 return shutil.copy(src, dst)
951 - def copy2(self, src, dst):
952 return shutil.copy2(src, dst)
953 - def exists(self, path):
954 return os.path.exists(path)
955 - def getmtime(self, path):
956 return os.path.getmtime(path)
957 - def getsize(self, path):
958 return os.path.getsize(path)
959 - def isdir(self, path):
960 return os.path.isdir(path)
961 - def isfile(self, path):
962 return os.path.isfile(path)
965 - def lstat(self, path):
966 return os.lstat(path)
967 - def listdir(self, path):
968 return os.listdir(path)
969 - def makedirs(self, path):
970 return os.makedirs(path)
971 - def mkdir(self, path):
972 return os.mkdir(path)
973 - def rename(self, old, new):
974 return os.rename(old, new)
975 - def stat(self, path):
976 return os.stat(path)
979 - def open(self, path):
980 return open(path)
983 984 if hasattr(os, 'symlink'): 987 else: 990 991 if hasattr(os, 'readlink'): 994 else:
997 998 999 #class RemoteFS: 1000 # # Skeleton for the obvious methods we might need from the 1001 # # abstraction layer for a remote filesystem. 1002 # def upload(self, local_src, remote_dst): 1003 # pass 1004 # def download(self, remote_src, local_dst): 1005 # pass 1006 1007
1008 -class FS(LocalFS):
1009 1010 memoizer_counters = [] 1011
1012 - def __init__(self, path = None):
1013 """Initialize the Node.FS subsystem. 1014 1015 The supplied path is the top of the source tree, where we 1016 expect to find the top-level build file. If no path is 1017 supplied, the current directory is the default. 1018 1019 The path argument must be a valid absolute path. 1020 """ 1021 if __debug__: logInstanceCreation(self, 'Node.FS') 1022 1023 self._memo = {} 1024 1025 self.Root = {} 1026 self.SConstruct_dir = None 1027 self.max_drift = default_max_drift 1028 1029 self.Top = None 1030 if path is None: 1031 self.pathTop = os.getcwd() 1032 else: 1033 self.pathTop = path 1034 self.defaultDrive = _my_normcase(os.path.splitdrive(self.pathTop)[0]) 1035 1036 self.Top = self.Dir(self.pathTop) 1037 self.Top.path = '.' 1038 self.Top.tpath = '.' 1039 self._cwd = self.Top 1040 1041 DirNodeInfo.fs = self 1042 FileNodeInfo.fs = self
1043
1044 - def set_SConstruct_dir(self, dir):
1045 self.SConstruct_dir = dir
1046
1047 - def get_max_drift(self):
1048 return self.max_drift
1049
1050 - def set_max_drift(self, max_drift):
1051 self.max_drift = max_drift
1052
1053 - def getcwd(self):
1054 return self._cwd
1055
1056 - def chdir(self, dir, change_os_dir=0):
1057 """Change the current working directory for lookups. 1058 If change_os_dir is true, we will also change the "real" cwd 1059 to match. 1060 """ 1061 curr=self._cwd 1062 try: 1063 if not dir is None: 1064 self._cwd = dir 1065 if change_os_dir: 1066 os.chdir(dir.abspath) 1067 except OSError: 1068 self._cwd = curr 1069 raise
1070
1071 - def get_root(self, drive):
1072 """ 1073 Returns the root directory for the specified drive, creating 1074 it if necessary. 1075 """ 1076 drive = _my_normcase(drive) 1077 try: 1078 return self.Root[drive] 1079 except KeyError: 1080 root = RootDir(drive, self) 1081 self.Root[drive] = root 1082 if not drive: 1083 self.Root[self.defaultDrive] = root 1084 elif drive == self.defaultDrive: 1085 self.Root[''] = root 1086 return root
1087
1088 - def _lookup(self, p, directory, fsclass, create=1):
1089 """ 1090 The generic entry point for Node lookup with user-supplied data. 1091 1092 This translates arbitrary input into a canonical Node.FS object 1093 of the specified fsclass. The general approach for strings is 1094 to turn it into a fully normalized absolute path and then call 1095 the root directory's lookup_abs() method for the heavy lifting. 1096 1097 If the path name begins with '#', it is unconditionally 1098 interpreted relative to the top-level directory of this FS. '#' 1099 is treated as a synonym for the top-level SConstruct directory, 1100 much like '~' is treated as a synonym for the user's home 1101 directory in a UNIX shell. So both '#foo' and '#/foo' refer 1102 to the 'foo' subdirectory underneath the top-level SConstruct 1103 directory. 1104 1105 If the path name is relative, then the path is looked up relative 1106 to the specified directory, or the current directory (self._cwd, 1107 typically the SConscript directory) if the specified directory 1108 is None. 1109 """ 1110 if isinstance(p, Base): 1111 # It's already a Node.FS object. Make sure it's the right 1112 # class and return. 1113 p.must_be_same(fsclass) 1114 return p 1115 # str(p) in case it's something like a proxy object 1116 p = str(p) 1117 1118 initial_hash = (p[0:1] == '#') 1119 if initial_hash: 1120 # There was an initial '#', so we strip it and override 1121 # whatever directory they may have specified with the 1122 # top-level SConstruct directory. 1123 p = p[1:] 1124 directory = self.Top 1125 1126 if directory and not isinstance(directory, Dir): 1127 directory = self.Dir(directory) 1128 1129 if do_splitdrive: 1130 drive, p = os.path.splitdrive(p) 1131 else: 1132 drive = '' 1133 if drive and not p: 1134 # This causes a naked drive letter to be treated as a synonym 1135 # for the root directory on that drive. 1136 p = os.sep 1137 absolute = os.path.isabs(p) 1138 1139 needs_normpath = needs_normpath_check.match(p) 1140 1141 if initial_hash or not absolute: 1142 # This is a relative lookup, either to the top-level 1143 # SConstruct directory (because of the initial '#') or to 1144 # the current directory (the path name is not absolute). 1145 # Add the string to the appropriate directory lookup path, 1146 # after which the whole thing gets normalized. 1147 if not directory: 1148 directory = self._cwd 1149 if p: 1150 p = directory.labspath + '/' + p 1151 else: 1152 p = directory.labspath 1153 1154 if needs_normpath: 1155 p = os.path.normpath(p) 1156 1157 if drive or absolute: 1158 root = self.get_root(drive) 1159 else: 1160 if not directory: 1161 directory = self._cwd 1162 root = directory.root 1163 1164 if os.sep != '/': 1165 p = string.replace(p, os.sep, '/') 1166 return root._lookup_abs(p, fsclass, create)
1167
1168 - def Entry(self, name, directory = None, create = 1):
1169 """Lookup or create a generic Entry node with the specified name. 1170 If the name is a relative path (begins with ./, ../, or a file 1171 name), then it is looked up relative to the supplied directory 1172 node, or to the top level directory of the FS (supplied at 1173 construction time) if no directory is supplied. 1174 """ 1175 return self._lookup(name, directory, Entry, create)
1176
1177 - def File(self, name, directory = None, create = 1):
1178 """Lookup or create a File node with the specified name. If 1179 the name is a relative path (begins with ./, ../, or a file name), 1180 then it is looked up relative to the supplied directory node, 1181 or to the top level directory of the FS (supplied at construction 1182 time) if no directory is supplied. 1183 1184 This method will raise TypeError if a directory is found at the 1185 specified path. 1186 """ 1187 return self._lookup(name, directory, File, create)
1188
1189 - def Dir(self, name, directory = None, create = True):
1190 """Lookup or create a Dir node with the specified name. If 1191 the name is a relative path (begins with ./, ../, or a file name), 1192 then it is looked up relative to the supplied directory node, 1193 or to the top level directory of the FS (supplied at construction 1194 time) if no directory is supplied. 1195 1196 This method will raise TypeError if a normal file is found at the 1197 specified path. 1198 """ 1199 return self._lookup(name, directory, Dir, create)
1200
1201 - def VariantDir(self, variant_dir, src_dir, duplicate=1):
1202 """Link the supplied variant directory to the source directory 1203 for purposes of building files.""" 1204 1205 if not isinstance(src_dir, SCons.Node.Node): 1206 src_dir = self.Dir(src_dir) 1207 if not isinstance(variant_dir, SCons.Node.Node): 1208 variant_dir = self.Dir(variant_dir) 1209 if src_dir.is_under(variant_dir): 1210 raise SCons.Errors.UserError, "Source directory cannot be under variant directory." 1211 if variant_dir.srcdir: 1212 if variant_dir.srcdir == src_dir: 1213 return # We already did this. 1214 raise SCons.Errors.UserError, "'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir) 1215 variant_dir.link(src_dir, duplicate)
1216
1217 - def Repository(self, *dirs):
1218 """Specify Repository directories to search.""" 1219 for d in dirs: 1220 if not isinstance(d, SCons.Node.Node): 1221 d = self.Dir(d) 1222 self.Top.addRepository(d)
1223
1224 - def variant_dir_target_climb(self, orig, dir, tail):
1225 """Create targets in corresponding variant directories 1226 1227 Climb the directory tree, and look up path names 1228 relative to any linked variant directories we find. 1229 1230 Even though this loops and walks up the tree, we don't memoize 1231 the return value because this is really only used to process 1232 the command-line targets. 1233 """ 1234 targets = [] 1235 message = None 1236 fmt = "building associated VariantDir targets: %s" 1237 start_dir = dir 1238 while dir: 1239 for bd in dir.variant_dirs: 1240 if start_dir.is_under(bd): 1241 # If already in the build-dir location, don't reflect 1242 return [orig], fmt % str(orig) 1243 p = apply(os.path.join, [bd.path] + tail) 1244 targets.append(self.Entry(p)) 1245 tail = [dir.name] + tail 1246 dir = dir.up() 1247 if targets: 1248 message = fmt % string.join(map(str, targets)) 1249 return targets, message
1250
1251 - def Glob(self, pathname, ondisk=True, source=True, strings=False, cwd=None):
1252 """ 1253 Globs 1254 1255 This is mainly a shim layer 1256 """ 1257 if cwd is None: 1258 cwd = self.getcwd() 1259 return cwd.glob(pathname, ondisk, source, strings)
1260
1261 -class DirNodeInfo(SCons.Node.NodeInfoBase):
1262 # This should get reset by the FS initialization. 1263 current_version_id = 1 1264 1265 fs = None 1266
1267 - def str_to_node(self, s):
1268 top = self.fs.Top 1269 root = top.root 1270 if do_splitdrive: 1271 drive, s = os.path.splitdrive(s) 1272 if drive: 1273 root = self.fs.get_root(drive) 1274 if not os.path.isabs(s): 1275 s = top.labspath + '/' + s 1276 return root._lookup_abs(s, Entry)
1277
1278 -class DirBuildInfo(SCons.Node.BuildInfoBase):
1279 current_version_id = 1
1280 1281 glob_magic_check = re.compile('[*?[]') 1282
1283 -def has_glob_magic(s):
1284 return glob_magic_check.search(s) is not None
1285
1286 -class Dir(Base):
1287 """A class for directories in a file system. 1288 """ 1289 1290 memoizer_counters = [] 1291 1292 NodeInfo = DirNodeInfo 1293 BuildInfo = DirBuildInfo 1294
1295 - def __init__(self, name, directory, fs):
1296 if __debug__: logInstanceCreation(self, 'Node.FS.Dir') 1297 Base.__init__(self, name, directory, fs) 1298 self._morph()
1299
1300 - def _morph(self):
1301 """Turn a file system Node (either a freshly initialized directory 1302 object or a separate Entry object) into a proper directory object. 1303 1304 Set up this directory's entries and hook it into the file 1305 system tree. Specify that directories (this Node) don't use 1306 signatures for calculating whether they're current. 1307 """ 1308 1309 self.repositories = [] 1310 self.srcdir = None 1311 1312 self.entries = {} 1313 self.entries['.'] = self 1314 self.entries['..'] = self.dir 1315 self.cwd = self 1316 self.searched = 0 1317 self._sconsign = None 1318 self.variant_dirs = [] 1319 self.root = self.dir.root 1320 1321 # Don't just reset the executor, replace its action list, 1322 # because it might have some pre-or post-actions that need to 1323 # be preserved. 1324 self.builder = get_MkdirBuilder() 1325 self.get_executor().set_action_list(self.builder.action)
1326
1327 - def diskcheck_match(self):
1328 diskcheck_match(self, self.isfile, 1329 "File %s found where directory expected.")
1330
1331 - def __clearRepositoryCache(self, duplicate=None):
1332 """Called when we change the repository(ies) for a directory. 1333 This clears any cached information that is invalidated by changing 1334 the repository.""" 1335 1336 for node in self.entries.values(): 1337 if node != self.dir: 1338 if node != self and isinstance(node, Dir): 1339 node.__clearRepositoryCache(duplicate) 1340 else: 1341 node.clear() 1342 try: 1343 del node._srcreps 1344 except AttributeError: 1345 pass 1346 if duplicate != None: 1347 node.duplicate=duplicate
1348
1349 - def __resetDuplicate(self, node):
1350 if node != self: 1351 node.duplicate = node.get_dir().duplicate
1352
1353 - def Entry(self, name):
1354 """ 1355 Looks up or creates an entry node named 'name' relative to 1356 this directory. 1357 """ 1358 return self.fs.Entry(name, self)
1359
1360 - def Dir(self, name, create=True):
1361 """ 1362 Looks up or creates a directory node named 'name' relative to 1363 this directory. 1364 """ 1365 dir = self.fs.Dir(name, self, create) 1366 return dir
1367
1368 - def File(self, name):
1369 """ 1370 Looks up or creates a file node named 'name' relative to 1371 this directory. 1372 """ 1373 return self.fs.File(name, self)
1374
1375 - def _lookup_rel(self, name, klass, create=1):
1376 """ 1377 Looks up a *normalized* relative path name, relative to this 1378 directory. 1379 1380 This method is intended for use by internal lookups with 1381 already-normalized path data. For general-purpose lookups, 1382 use the Entry(), Dir() and File() methods above. 1383 1384 This method does *no* input checking and will die or give 1385 incorrect results if it's passed a non-normalized path name (e.g., 1386 a path containing '..'), an absolute path name, a top-relative 1387 ('#foo') path name, or any kind of object. 1388 """ 1389 name = self.entry_labspath(name) 1390 return self.root._lookup_abs(name, klass, create)
1391 1399
1400 - def getRepositories(self):
1401 """Returns a list of repositories for this directory. 1402 """ 1403 if self.srcdir and not self.duplicate: 1404 return self.srcdir.get_all_rdirs() + self.repositories 1405 return self.repositories
1406 1407 memoizer_counters.append(SCons.Memoize.CountValue('get_all_rdirs')) 1408
1409 - def get_all_rdirs(self):
1410 try: 1411 return self._memo['get_all_rdirs'] 1412 except KeyError: 1413 pass 1414 1415 result = [self] 1416 fname = '.' 1417 dir = self 1418 while dir: 1419 for rep in dir.getRepositories(): 1420 result.append(rep.Dir(fname)) 1421 if fname == '.': 1422 fname = dir.name 1423 else: 1424 fname = dir.name + os.sep + fname 1425 dir = dir.up() 1426 1427 self._memo['get_all_rdirs'] = result 1428 1429 return result
1430
1431 - def addRepository(self, dir):
1432 if dir != self and not dir in self.repositories: 1433 self.repositories.append(dir) 1434 dir.tpath = '.' 1435 self.__clearRepositoryCache()
1436
1437 - def up(self):
1438 return self.entries['..']
1439
1440 - def _rel_path_key(self, other):
1441 return str(other)
1442 1443 memoizer_counters.append(SCons.Memoize.CountDict('rel_path', _rel_path_key)) 1444
1445 - def rel_path(self, other):
1446 """Return a path to "other" relative to this directory. 1447 """ 1448 1449 # This complicated and expensive method, which constructs relative 1450 # paths between arbitrary Node.FS objects, is no longer used 1451 # by SCons itself. It was introduced to store dependency paths 1452 # in .sconsign files relative to the target, but that ended up 1453 # being significantly inefficient. 1454 # 1455 # We're continuing to support the method because some SConstruct 1456 # files out there started using it when it was available, and 1457 # we're all about backwards compatibility.. 1458 1459 try: 1460 memo_dict = self._memo['rel_path'] 1461 except KeyError: 1462 memo_dict = {} 1463 self._memo['rel_path'] = memo_dict 1464 else: 1465 try: 1466 return memo_dict[other] 1467 except KeyError: 1468 pass 1469 1470 if self is other: 1471 1472 result = '.' 1473 1474 elif not other in self.path_elements: 1475 1476 try: 1477 other_dir = other.get_dir() 1478 except AttributeError: 1479 result = str(other) 1480 else: 1481 if other_dir is None: 1482 result = other.name 1483 else: 1484 dir_rel_path = self.rel_path(other_dir) 1485 if dir_rel_path == '.': 1486 result = other.name 1487 else: 1488 result = dir_rel_path + os.sep + other.name 1489 1490 else: 1491 1492 i = self.path_elements.index(other) + 1 1493 1494 path_elems = ['..'] * (len(self.path_elements) - i) \ 1495 + map(lambda n: n.name, other.path_elements[i:]) 1496 1497 result = string.join(path_elems, os.sep) 1498 1499 memo_dict[other] = result 1500 1501 return result
1502
1503 - def get_env_scanner(self, env, kw={}):
1504 import SCons.Defaults 1505 return SCons.Defaults.DirEntryScanner
1506
1507 - def get_target_scanner(self):
1508 import SCons.Defaults 1509 return SCons.Defaults.DirEntryScanner
1510
1511 - def get_found_includes(self, env, scanner, path):
1512 """Return this directory's implicit dependencies. 1513 1514 We don't bother caching the results because the scan typically 1515 shouldn't be requested more than once (as opposed to scanning 1516 .h file contents, which can be requested as many times as the 1517 files is #included by other files). 1518 """ 1519 if not scanner: 1520 return [] 1521 # Clear cached info for this Dir. If we already visited this 1522 # directory on our walk down the tree (because we didn't know at 1523 # that point it was being used as the source for another Node) 1524 # then we may have calculated build signature before realizing 1525 # we had to scan the disk. Now that we have to, though, we need 1526 # to invalidate the old calculated signature so that any node 1527 # dependent on our directory structure gets one that includes 1528 # info about everything on disk. 1529 self.clear() 1530 return scanner(self, env, path)
1531 1532 # 1533 # Taskmaster interface subsystem 1534 # 1535
1536 - def prepare(self):
1537 pass
1538
1539 - def build(self, **kw):
1540 """A null "builder" for directories.""" 1541 global MkdirBuilder 1542 if not self.builder is MkdirBuilder: 1543 apply(SCons.Node.Node.build, [self,], kw)
1544 1545 # 1546 # 1547 # 1548
1549 - def _create(self):
1550 """Create this directory, silently and without worrying about 1551 whether the builder is the default or not.""" 1552 listDirs = [] 1553 parent = self 1554 while parent: 1555 if parent.exists(): 1556 break 1557 listDirs.append(parent) 1558 p = parent.up() 1559 if p is None: 1560 raise SCons.Errors.StopError, parent.path 1561 parent = p 1562 listDirs.reverse() 1563 for dirnode in listDirs: 1564 try: 1565 # Don't call dirnode.build(), call the base Node method 1566 # directly because we definitely *must* create this 1567 # directory. The dirnode.build() method will suppress 1568 # the build if it's the default builder. 1569 SCons.Node.Node.build(dirnode) 1570 dirnode.get_executor().nullify() 1571 # The build() action may or may not have actually 1572 # created the directory, depending on whether the -n 1573 # option was used or not. Delete the _exists and 1574 # _rexists attributes so they can be reevaluated. 1575 dirnode.clear() 1576 except OSError: 1577 pass
1578
1580 global MkdirBuilder 1581 return not self.builder is MkdirBuilder and self.has_builder()
1582
1583 - def alter_targets(self):
1584 """Return any corresponding targets in a variant directory. 1585 """ 1586 return self.fs.variant_dir_target_climb(self, self, [])
1587
1588 - def scanner_key(self):
1589 """A directory does not get scanned.""" 1590 return None
1591
1592 - def get_contents(self):
1593 """Return aggregate contents of all our children.""" 1594 contents = map(lambda n: n.get_contents(), self.children()) 1595 return string.join(contents, '')
1596
1597 - def do_duplicate(self, src):
1598 pass
1599 1600 changed_since_last_build = SCons.Node.Node.state_has_changed 1601
1602 - def is_up_to_date(self):
1603 """If any child is not up-to-date, then this directory isn't, 1604 either.""" 1605 if not self.builder is MkdirBuilder and not self.exists(): 1606 return 0 1607 up_to_date = SCons.Node.up_to_date 1608 for kid in self.children(): 1609 if kid.get_state() > up_to_date: 1610 return 0 1611 return 1
1612
1613 - def rdir(self):
1614 if not self.exists(): 1615 norm_name = _my_normcase(self.name) 1616 for dir in self.dir.get_all_rdirs(): 1617 try: node = dir.entries[norm_name] 1618 except KeyError: node = dir.dir_on_disk(self.name) 1619 if node and node.exists() and \ 1620 (isinstance(dir, Dir) or isinstance(dir, Entry)): 1621 return node 1622 return self
1623
1624 - def sconsign(self):
1625 """Return the .sconsign file info for this directory, 1626 creating it first if necessary.""" 1627 if not self._sconsign: 1628 import SCons.SConsign 1629 self._sconsign = SCons.SConsign.ForDirectory(self) 1630 return self._sconsign
1631
1632 - def srcnode(self):
1633 """Dir has a special need for srcnode()...if we 1634 have a srcdir attribute set, then that *is* our srcnode.""" 1635 if self.srcdir: 1636 return self.srcdir 1637 return Base.srcnode(self)
1638
1639 - def get_timestamp(self):
1640 """Return the latest timestamp from among our children""" 1641 stamp = 0 1642 for kid in self.children(): 1643 if kid.get_timestamp() > stamp: 1644 stamp = kid.get_timestamp() 1645 return stamp
1646
1647 - def entry_abspath(self, name):
1648 return self.abspath + os.sep + name
1649
1650 - def entry_labspath(self, name):
1651 return self.labspath + '/' + name
1652
1653 - def entry_path(self, name):
1654 return self.path + os.sep + name
1655
1656 - def entry_tpath(self, name):
1657 return self.tpath + os.sep + name
1658
1659 - def entry_exists_on_disk(self, name):
1660 try: 1661 d = self.on_disk_entries 1662 except AttributeError: 1663 d = {} 1664 try: 1665 entries = os.listdir(self.abspath) 1666 except OSError: 1667 pass 1668 else: 1669 for entry in map(_my_normcase, entries): 1670 d[entry] = 1 1671 self.on_disk_entries = d 1672 return d.has_key(_my_normcase(name))
1673 1674 memoizer_counters.append(SCons.Memoize.CountValue('srcdir_list')) 1675
1676 - def srcdir_list(self):
1677 try: 1678 return self._memo['srcdir_list'] 1679 except KeyError: 1680 pass 1681 1682 result = [] 1683 1684 dirname = '.' 1685 dir = self 1686 while dir: 1687 if dir.srcdir: 1688 result.append(dir.srcdir.Dir(dirname)) 1689 dirname = dir.name + os.sep + dirname 1690 dir = dir.up() 1691 1692 self._memo['srcdir_list'] = result 1693 1694 return result
1695
1696 - def srcdir_duplicate(self, name):
1697 for dir in self.srcdir_list(): 1698 if self.is_under(dir): 1699 # We shouldn't source from something in the build path; 1700 # variant_dir is probably under src_dir, in which case 1701 # we are reflecting. 1702 break 1703 if dir.entry_exists_on_disk(name): 1704 srcnode = dir.Entry(name).disambiguate() 1705 if self.duplicate: 1706 node = self.Entry(name).disambiguate() 1707 node.do_duplicate(srcnode) 1708 return node 1709 else: 1710 return srcnode 1711 return None
1712
1713 - def _srcdir_find_file_key(self, filename):
1714 return filename
1715 1716 memoizer_counters.append(SCons.Memoize.CountDict('srcdir_find_file', _srcdir_find_file_key)) 1717
1718 - def srcdir_find_file(self, filename):
1719 try: 1720 memo_dict = self._memo['srcdir_find_file'] 1721 except KeyError: 1722 memo_dict = {} 1723 self._memo['srcdir_find_file'] = memo_dict 1724 else: 1725 try: 1726 return memo_dict[filename] 1727 except KeyError: 1728 pass 1729 1730 def func(node): 1731 if (isinstance(node, File) or isinstance(node, Entry)) and \ 1732 (node.is_derived() or node.exists()): 1733 return node 1734 return None
1735 1736 norm_name = _my_normcase(filename) 1737 1738 for rdir in self.get_all_rdirs(): 1739 try: node = rdir.entries[norm_name] 1740 except KeyError: node = rdir.file_on_disk(filename) 1741 else: node = func(node) 1742 if node: 1743 result = (node, self) 1744 memo_dict[filename] = result 1745 return result 1746 1747 for srcdir in self.srcdir_list(): 1748 for rdir in srcdir.get_all_rdirs(): 1749 try: node = rdir.entries[norm_name] 1750 except KeyError: node = rdir.file_on_disk(filename) 1751 else: node = func(node) 1752 if node: 1753 result = (File(filename, self, self.fs), srcdir) 1754 memo_dict[filename] = result 1755 return result 1756 1757 result = (None, None) 1758 memo_dict[filename] = result 1759 return result
1760
1761 - def dir_on_disk(self, name):
1762 if self.entry_exists_on_disk(name): 1763 try: return self.Dir(name) 1764 except TypeError: pass 1765 return None
1766
1767 - def file_on_disk(self, name):
1768 if self.entry_exists_on_disk(name) or \ 1769 diskcheck_rcs(self, name) or \ 1770 diskcheck_sccs(self, name): 1771 try: return self.File(name) 1772 except TypeError: pass 1773 node = self.srcdir_duplicate(name) 1774 if isinstance(node, Dir): 1775 node = None 1776 return node
1777
1778 - def walk(self, func, arg):
1779 """ 1780 Walk this directory tree by calling the specified function 1781 for each directory in the tree. 1782 1783 This behaves like the os.path.walk() function, but for in-memory 1784 Node.FS.Dir objects. The function takes the same arguments as 1785 the functions passed to os.path.walk(): 1786 1787 func(arg, dirname, fnames) 1788 1789 Except that "dirname" will actually be the directory *Node*, 1790 not the string. The '.' and '..' entries are excluded from 1791 fnames. The fnames list may be modified in-place to filter the 1792 subdirectories visited or otherwise impose a specific order. 1793 The "arg" argument is always passed to func() and may be used 1794 in any way (or ignored, passing None is common). 1795 """ 1796 entries = self.entries 1797 names = entries.keys() 1798 names.remove('.') 1799 names.remove('..') 1800 func(arg, self, names) 1801 select_dirs = lambda n, e=entries: isinstance(e[n], Dir) 1802 for dirname in filter(select_dirs, names): 1803 entries[dirname].walk(func, arg)
1804
1805 - def glob(self, pathname, ondisk=True, source=False, strings=False):
1806 """ 1807 Returns a list of Nodes (or strings) matching a specified 1808 pathname pattern. 1809 1810 Pathname patterns follow UNIX shell semantics: * matches 1811 any-length strings of any characters, ? matches any character, 1812 and [] can enclose lists or ranges of characters. Matches do 1813 not span directory separators. 1814 1815 The matches take into account Repositories, returning local 1816 Nodes if a corresponding entry exists in a Repository (either 1817 an in-memory Node or something on disk). 1818 1819 By defafult, the glob() function matches entries that exist 1820 on-disk, in addition to in-memory Nodes. Setting the "ondisk" 1821 argument to False (or some other non-true value) causes the glob() 1822 function to only match in-memory Nodes. The default behavior is 1823 to return both the on-disk and in-memory Nodes. 1824 1825 The "source" argument, when true, specifies that corresponding 1826 source Nodes must be returned if you're globbing in a build 1827 directory (initialized with VariantDir()). The default behavior 1828 is to return Nodes local to the VariantDir(). 1829 1830 The "strings" argument, when true, returns the matches as strings, 1831 not Nodes. The strings are path names relative to this directory. 1832 1833 The underlying algorithm is adapted from the glob.glob() function 1834 in the Python library (but heavily modified), and uses fnmatch() 1835 under the covers. 1836 """ 1837 dirname, basename = os.path.split(pathname) 1838 if not dirname: 1839 return self._glob1(basename, ondisk, source, strings) 1840 if has_glob_magic(dirname): 1841 list = self.glob(dirname, ondisk, source, strings=False) 1842 else: 1843 list = [self.Dir(dirname, create=True)] 1844 result = [] 1845 for dir in list: 1846 r = dir._glob1(basename, ondisk, source, strings) 1847 if strings: 1848 r = map(lambda x, d=str(dir): os.path.join(d, x), r) 1849 result.extend(r) 1850 return result
1851
1852 - def _glob1(self, pattern, ondisk=True, source=False, strings=False):
1853 """ 1854 Globs for and returns a list of entry names matching a single 1855 pattern in this directory. 1856 1857 This searches any repositories and source directories for 1858 corresponding entries and returns a Node (or string) relative 1859 to the current directory if an entry is found anywhere. 1860 1861 TODO: handle pattern with no wildcard 1862 """ 1863 search_dir_list = self.get_all_rdirs() 1864 for srcdir in self.srcdir_list(): 1865 search_dir_list.extend(srcdir.get_all_rdirs()) 1866 1867 names = [] 1868 for dir in search_dir_list: 1869 # We use the .name attribute from the Node because the keys of 1870 # the dir.entries dictionary are normalized (that is, all upper 1871 # case) on case-insensitive systems like Windows. 1872 #node_names = [ v.name for k, v in dir.entries.items() if k not in ('.', '..') ] 1873 entry_names = filter(lambda n: n not in ('.', '..'), dir.entries.keys()) 1874 node_names = map(lambda n, e=dir.entries: e[n].name, entry_names) 1875 names.extend(node_names) 1876 if ondisk: 1877 try: 1878 disk_names = os.listdir(dir.abspath) 1879 except os.error: 1880 pass 1881 else: 1882 names.extend(disk_names) 1883 if not strings: 1884 # We're going to return corresponding Nodes in 1885 # the local directory, so we need to make sure 1886 # those Nodes exist. We only want to create 1887 # Nodes for the entries that will match the 1888 # specified pattern, though, which means we 1889 # need to filter the list here, even though 1890 # the overall list will also be filtered later, 1891 # after we exit this loop. 1892 if pattern[0] != '.': 1893 #disk_names = [ d for d in disk_names if d[0] != '.' ] 1894 disk_names = filter(lambda x: x[0] != '.', disk_names) 1895 disk_names = fnmatch.filter(disk_names, pattern) 1896 rep_nodes = map(dir.Entry, disk_names) 1897 #rep_nodes = [ n.disambiguate() for n in rep_nodes ] 1898 rep_nodes = map(lambda n: n.disambiguate(), rep_nodes) 1899 for node, name in izip(rep_nodes, disk_names): 1900 n = self.Entry(name) 1901 if n.__class__ != node.__class__: 1902 n.__class__ = node.__class__ 1903 n._morph() 1904 1905 names = set(names) 1906 if pattern[0] != '.': 1907 #names = [ n for n in names if n[0] != '.' ] 1908 names = filter(lambda x: x[0] != '.', names) 1909 names = fnmatch.filter(names, pattern) 1910 1911 if strings: 1912 return names 1913 1914 #return [ self.entries[_my_normcase(n)] for n in names ] 1915 return map(lambda n, e=self.entries: e[_my_normcase(n)], names)
1916
1917 -class RootDir(Dir):
1918 """A class for the root directory of a file system. 1919 1920 This is the same as a Dir class, except that the path separator 1921 ('/' or '\\') is actually part of the name, so we don't need to 1922 add a separator when creating the path names of entries within 1923 this directory. 1924 """
1925 - def __init__(self, name, fs):
1926 if __debug__: logInstanceCreation(self, 'Node.FS.RootDir') 1927 # We're going to be our own parent directory (".." entry and .dir 1928 # attribute) so we have to set up some values so Base.__init__() 1929 # won't gag won't it calls some of our methods. 1930 self.abspath = '' 1931 self.labspath = '' 1932 self.path = '' 1933 self.tpath = '' 1934 self.path_elements = [] 1935 self.duplicate = 0 1936 self.root = self 1937 Base.__init__(self, name, self, fs) 1938 1939 # Now set our paths to what we really want them to be: the 1940 # initial drive letter (the name) plus the directory separator, 1941 # except for the "lookup abspath," which does not have the 1942 # drive letter. 1943 self.abspath = name + os.sep 1944 self.labspath = '' 1945 self.path = name + os.sep 1946 self.tpath = name + os.sep 1947 self._morph() 1948 1949 self._lookupDict = {} 1950 1951 # The // and os.sep + os.sep entries are necessary because 1952 # os.path.normpath() seems to preserve double slashes at the 1953 # beginning of a path (presumably for UNC path names), but 1954 # collapses triple slashes to a single slash. 1955 self._lookupDict[''] = self 1956 self._lookupDict['/'] = self 1957 self._lookupDict['//'] = self 1958 self._lookupDict[os.sep] = self 1959 self._lookupDict[os.sep + os.sep] = self
1960
1961 - def must_be_same(self, klass):
1962 if klass is Dir: 1963 return 1964 Base.must_be_same(self, klass)
1965
1966 - def _lookup_abs(self, p, klass, create=1):
1967 """ 1968 Fast (?) lookup of a *normalized* absolute path. 1969 1970 This method is intended for use by internal lookups with 1971 already-normalized path data. For general-purpose lookups, 1972 use the FS.Entry(), FS.Dir() or FS.File() methods. 1973 1974 The caller is responsible for making sure we're passed a 1975 normalized absolute path; we merely let Python's dictionary look 1976 up and return the One True Node.FS object for the path. 1977 1978 If no Node for the specified "p" doesn't already exist, and 1979 "create" is specified, the Node may be created after recursive 1980 invocation to find or create the parent directory or directories. 1981 """ 1982 k = _my_normcase(p) 1983 try: 1984 result = self._lookupDict[k] 1985 except KeyError: 1986 if not create: 1987 raise SCons.Errors.UserError 1988 # There is no Node for this path name, and we're allowed 1989 # to create it. 1990 dir_name, file_name = os.path.split(p) 1991 dir_node = self._lookup_abs(dir_name, Dir) 1992 result = klass(file_name, dir_node, self.fs) 1993 self._lookupDict[k] = result 1994 dir_node.entries[_my_normcase(file_name)] = result 1995 dir_node.implicit = None 1996 1997 # Double-check on disk (as configured) that the Node we 1998 # created matches whatever is out there in the real world. 1999 result.diskcheck_match() 2000 else: 2001 # There is already a Node for this path name. Allow it to 2002 # complain if we were looking for an inappropriate type. 2003 result.must_be_same(klass) 2004 return result
2005
2006 - def __str__(self):
2007 return self.abspath
2008
2009 - def entry_abspath(self, name):
2010 return self.abspath + name
2011
2012 - def entry_labspath(self, name):
2013 return '/' + name
2014
2015 - def entry_path(self, name):
2016 return self.path + name
2017
2018 - def entry_tpath(self, name):
2019 return self.tpath + name
2020
2021 - def is_under(self, dir):
2022 if self is dir: 2023 return 1 2024 else: 2025 return 0
2026
2027 - def up(self):
2028 return None
2029
2030 - def get_dir(self):
2031 return None
2032
2033 - def src_builder(self):
2034 return _null
2035
2036 -class FileNodeInfo(SCons.Node.NodeInfoBase):
2037 current_version_id = 1 2038 2039 field_list = ['csig', 'timestamp', 'size'] 2040 2041 # This should get reset by the FS initialization. 2042 fs = None 2043
2044 - def str_to_node(self, s):
2045 top = self.fs.Top 2046 root = top.root 2047 if do_splitdrive: 2048 drive, s = os.path.splitdrive(s) 2049 if drive: 2050 root = self.fs.get_root(drive) 2051 if not os.path.isabs(s): 2052 s = top.labspath + '/' + s 2053 return root._lookup_abs(s, Entry)
2054
2055 -class FileBuildInfo(SCons.Node.BuildInfoBase):
2056 current_version_id = 1 2057
2058 - def convert_to_sconsign(self):
2059 """ 2060 Converts this FileBuildInfo object for writing to a .sconsign file 2061 2062 This replaces each Node in our various dependency lists with its 2063 usual string representation: relative to the top-level SConstruct 2064 directory, or an absolute path if it's outside. 2065 """ 2066 if os.sep == '/': 2067 node_to_str = str 2068 else: 2069 def node_to_str(n): 2070 try: 2071 s = n.path 2072 except AttributeError: 2073 s = str(n) 2074 else: 2075 s = string.replace(s, os.sep, '/') 2076 return s
2077 for attr in ['bsources', 'bdepends', 'bimplicit']: 2078 try: 2079 val = getattr(self, attr) 2080 except AttributeError: 2081 pass 2082 else: 2083 setattr(self, attr, map(node_to_str, val))
2084 - def convert_from_sconsign(self, dir, name):
2085 """ 2086 Converts a newly-read FileBuildInfo object for in-SCons use 2087 2088 For normal up-to-date checking, we don't have any conversion to 2089 perform--but we're leaving this method here to make that clear. 2090 """ 2091 pass
2092 - def prepare_dependencies(self):
2093 """ 2094 Prepares a FileBuildInfo object for explaining what changed 2095 2096 The bsources, bdepends and bimplicit lists have all been 2097 stored on disk as paths relative to the top-level SConstruct 2098 directory. Convert the strings to actual Nodes (for use by the 2099 --debug=explain code and --implicit-cache). 2100 """ 2101 attrs = [ 2102 ('bsources', 'bsourcesigs'), 2103 ('bdepends', 'bdependsigs'), 2104 ('bimplicit', 'bimplicitsigs'), 2105 ] 2106 for (nattr, sattr) in attrs: 2107 try: 2108 strings = getattr(self, nattr) 2109 nodeinfos = getattr(self, sattr) 2110 except AttributeError: 2111 pass 2112 else: 2113 nodes = [] 2114 for s, ni in izip(strings, nodeinfos): 2115 if not isinstance(s, SCons.Node.Node): 2116 s = ni.str_to_node(s) 2117 nodes.append(s) 2118 setattr(self, nattr, nodes)
2119 - def format(self, names=0):
2120 result = [] 2121 bkids = self.bsources + self.bdepends + self.bimplicit 2122 bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs 2123 for bkid, bkidsig in izip(bkids, bkidsigs): 2124 result.append(str(bkid) + ': ' + 2125 string.join(bkidsig.format(names=names), ' ')) 2126 result.append('%s [%s]' % (self.bactsig, self.bact)) 2127 return string.join(result, '\n')
2128
2129 -class File(Base):
2130 """A class for files in a file system. 2131 """ 2132 2133 memoizer_counters = [] 2134 2135 NodeInfo = FileNodeInfo 2136 BuildInfo = FileBuildInfo 2137
2138 - def diskcheck_match(self):
2139 diskcheck_match(self, self.isdir, 2140 "Directory %s found where file expected.")
2141
2142 - def __init__(self, name, directory, fs):
2143 if __debug__: logInstanceCreation(self, 'Node.FS.File') 2144 Base.__init__(self, name, directory, fs) 2145 self._morph()
2146
2147 - def Entry(self, name):
2148 """Create an entry node named 'name' relative to 2149 the SConscript directory of this file.""" 2150 return self.cwd.Entry(name)
2151
2152 - def Dir(self, name, create=True):
2153 """Create a directory node named 'name' relative to 2154 the SConscript directory of this file.""" 2155 return self.cwd.Dir(name, create)
2156
2157 - def Dirs(self, pathlist):
2158 """Create a list of directories relative to the SConscript 2159 directory of this file.""" 2160 return map(lambda p, s=self: s.Dir(p), pathlist)
2161
2162 - def File(self, name):
2163 """Create a file node named 'name' relative to 2164 the SConscript directory of this file.""" 2165 return self.cwd.File(name)
2166 2167 #def generate_build_dict(self): 2168 # """Return an appropriate dictionary of values for building 2169 # this File.""" 2170 # return {'Dir' : self.Dir, 2171 # 'File' : self.File, 2172 # 'RDirs' : self.RDirs} 2173
2174 - def _morph(self):
2175 """Turn a file system node into a File object.""" 2176 self.scanner_paths = {} 2177 if not hasattr(self, '_local'): 2178 self._local = 0 2179 2180 # If there was already a Builder set on this entry, then 2181 # we need to make sure we call the target-decider function, 2182 # not the source-decider. Reaching in and doing this by hand 2183 # is a little bogus. We'd prefer to handle this by adding 2184 # an Entry.builder_set() method that disambiguates like the 2185 # other methods, but that starts running into problems with the 2186 # fragile way we initialize Dir Nodes with their Mkdir builders, 2187 # yet still allow them to be overridden by the user. Since it's 2188 # not clear right now how to fix that, stick with what works 2189 # until it becomes clear... 2190 if self.has_builder(): 2191 self.changed_since_last_build = self.decide_target
2192
2193 - def scanner_key(self):
2194 return self.get_suffix()
2195
2196 - def get_contents(self):
2197 if not self.rexists(): 2198 return '' 2199 fname = self.rfile().abspath 2200 try: 2201 r = open(fname, "rb").read() 2202 except EnvironmentError, e: 2203 if not e.filename: 2204 e.filename = fname 2205 raise 2206 return r
2207 2208 memoizer_counters.append(SCons.Memoize.CountValue('get_size')) 2209
2210 - def get_size(self):
2211 try: 2212 return self._memo['get_size'] 2213 except KeyError: 2214 pass 2215 2216 if self.rexists(): 2217 size = self.rfile().getsize() 2218 else: 2219 size = 0 2220 2221 self._memo['get_size'] = size 2222 2223 return size
2224 2225 memoizer_counters.append(SCons.Memoize.CountValue('get_timestamp')) 2226
2227 - def get_timestamp(self):
2228 try: 2229 return self._memo['get_timestamp'] 2230 except KeyError: 2231 pass 2232 2233 if self.rexists(): 2234 timestamp = self.rfile().getmtime() 2235 else: 2236 timestamp = 0 2237 2238 self._memo['get_timestamp'] = timestamp 2239 2240 return timestamp
2241
2242 - def store_info(self):
2243 # Merge our build information into the already-stored entry. 2244 # This accomodates "chained builds" where a file that's a target 2245 # in one build (SConstruct file) is a source in a different build. 2246 # See test/chained-build.py for the use case. 2247 self.dir.sconsign().store_info(self.name, self)
2248 2249 convert_copy_attrs = [ 2250 'bsources', 2251 'bimplicit', 2252 'bdepends', 2253 'bact', 2254 'bactsig', 2255 'ninfo', 2256 ] 2257 2258 2259 convert_sig_attrs = [ 2260 'bsourcesigs', 2261 'bimplicitsigs', 2262 'bdependsigs', 2263 ] 2264
2265 - def convert_old_entry(self, old_entry):
2266 # Convert a .sconsign entry from before the Big Signature 2267 # Refactoring, doing what we can to convert its information 2268 # to the new .sconsign entry format. 2269 # 2270 # The old format looked essentially like this: 2271 # 2272 # BuildInfo 2273 # .ninfo (NodeInfo) 2274 # .bsig 2275 # .csig 2276 # .timestamp 2277 # .size 2278 # .bsources 2279 # .bsourcesigs ("signature" list) 2280 # .bdepends 2281 # .bdependsigs ("signature" list) 2282 # .bimplicit 2283 # .bimplicitsigs ("signature" list) 2284 # .bact 2285 # .bactsig 2286 # 2287 # The new format looks like this: 2288 # 2289 # .ninfo (NodeInfo) 2290 # .bsig 2291 # .csig 2292 # .timestamp 2293 # .size 2294 # .binfo (BuildInfo) 2295 # .bsources 2296 # .bsourcesigs (NodeInfo list) 2297 # .bsig 2298 # .csig 2299 # .timestamp 2300 # .size 2301 # .bdepends 2302 # .bdependsigs (NodeInfo list) 2303 # .bsig 2304 # .csig 2305 # .timestamp 2306 # .size 2307 # .bimplicit 2308 # .bimplicitsigs (NodeInfo list) 2309 # .bsig 2310 # .csig 2311 # .timestamp 2312 # .size 2313 # .bact 2314 # .bactsig 2315 # 2316 # The basic idea of the new structure is that a NodeInfo always 2317 # holds all available information about the state of a given Node 2318 # at a certain point in time. The various .b*sigs lists can just 2319 # be a list of pointers to the .ninfo attributes of the different 2320 # dependent nodes, without any copying of information until it's 2321 # time to pickle it for writing out to a .sconsign file. 2322 # 2323 # The complicating issue is that the *old* format only stored one 2324 # "signature" per dependency, based on however the *last* build 2325 # was configured. We don't know from just looking at it whether 2326 # it was a build signature, a content signature, or a timestamp 2327 # "signature". Since we no longer use build signatures, the 2328 # best we can do is look at the length and if it's thirty two, 2329 # assume that it was (or might have been) a content signature. 2330 # If it was actually a build signature, then it will cause a 2331 # rebuild anyway when it doesn't match the new content signature, 2332 # but that's probably the best we can do. 2333 import SCons.SConsign 2334 new_entry = SCons.SConsign.SConsignEntry() 2335 new_entry.binfo = self.new_binfo() 2336 binfo = new_entry.binfo 2337 for attr in self.convert_copy_attrs: 2338 try: 2339 value = getattr(old_entry, attr) 2340 except AttributeError: 2341 pass 2342 else: 2343 setattr(binfo, attr, value) 2344 delattr(old_entry, attr) 2345 for attr in self.convert_sig_attrs: 2346 try: 2347 sig_list = getattr(old_entry, attr) 2348 except AttributeError: 2349 pass 2350 else: 2351 value = [] 2352 for sig in sig_list: 2353 ninfo = self.new_ninfo() 2354 if len(sig) == 32: 2355 ninfo.csig = sig 2356 else: 2357 ninfo.timestamp = sig 2358 value.append(ninfo) 2359 setattr(binfo, attr, value) 2360 delattr(old_entry, attr) 2361 return new_entry
2362 2363 memoizer_counters.append(SCons.Memoize.CountValue('get_stored_info')) 2364
2365 - def get_stored_info(self):
2366 try: 2367 return self._memo['get_stored_info'] 2368 except KeyError: 2369 pass 2370 2371 try: 2372 sconsign_entry = self.dir.sconsign().get_entry(self.name) 2373 except (KeyError, OSError): 2374 import SCons.SConsign 2375 sconsign_entry = SCons.SConsign.SConsignEntry() 2376 sconsign_entry.binfo = self.new_binfo() 2377 sconsign_entry.ninfo = self.new_ninfo() 2378 else: 2379 if isinstance(sconsign_entry, FileBuildInfo): 2380 # This is a .sconsign file from before the Big Signature 2381 # Refactoring; convert it as best we can. 2382 sconsign_entry = self.convert_old_entry(sconsign_entry) 2383 try: 2384 delattr(sconsign_entry.ninfo, 'bsig') 2385 except AttributeError: 2386 pass 2387 2388 self._memo['get_stored_info'] = sconsign_entry 2389 2390 return sconsign_entry
2391
2392 - def get_stored_implicit(self):
2393 binfo = self.get_stored_info().binfo 2394 binfo.prepare_dependencies() 2395 try: return binfo.bimplicit 2396 except AttributeError: return None
2397
2398 - def rel_path(self, other):
2399 return self.dir.rel_path(other)
2400
2401 - def _get_found_includes_key(self, env, scanner, path):
2402 return (id(env), id(scanner), path)
2403 2404 memoizer_counters.append(SCons.Memoize.CountDict('get_found_includes', _get_found_includes_key)) 2405
2406 - def get_found_includes(self, env, scanner, path):
2407 """Return the included implicit dependencies in this file. 2408 Cache results so we only scan the file once per path 2409 regardless of how many times this information is requested. 2410 """ 2411 memo_key = (id(env), id(scanner), path) 2412 try: 2413 memo_dict = self._memo['get_found_includes'] 2414 except KeyError: 2415 memo_dict = {} 2416 self._memo['get_found_includes'] = memo_dict 2417 else: 2418 try: 2419 return memo_dict[memo_key] 2420 except KeyError: 2421 pass 2422 2423 if scanner: 2424 result = scanner(self, env, path) 2425 result = map(lambda N: N.disambiguate(), result) 2426 else: 2427 result = [] 2428 2429 memo_dict[memo_key] = result 2430 2431 return result
2432
2433 - def _createDir(self):
2434 # ensure that the directories for this node are 2435 # created. 2436 self.dir._create()
2437
2438 - def retrieve_from_cache(self):
2439 """Try to retrieve the node's content from a cache 2440 2441 This method is called from multiple threads in a parallel build, 2442 so only do thread safe stuff here. Do thread unsafe stuff in 2443 built(). 2444 2445 Returns true iff the node was successfully retrieved. 2446 """ 2447 if self.nocache: 2448 return None 2449 if not self.is_derived(): 2450 return None 2451 return self.get_build_env().get_CacheDir().retrieve(self)
2452
2453 - def built(self):
2454 """ 2455 Called just after this node is successfully built. 2456 """ 2457 # Push this file out to cache before the superclass Node.built() 2458 # method has a chance to clear the build signature, which it 2459 # will do if this file has a source scanner. 2460 # 2461 # We have to clear the memoized values *before* we push it to 2462 # cache so that the memoization of the self.exists() return 2463 # value doesn't interfere. 2464 self.clear_memoized_values() 2465 if self.exists(): 2466 self.get_build_env().get_CacheDir().push(self) 2467 SCons.Node.Node.built(self)
2468
2469 - def visited(self):
2470 if self.exists(): 2471 self.get_build_env().get_CacheDir().push_if_forced(self) 2472 2473 ninfo = self.get_ninfo() 2474 2475 csig = self.get_max_drift_csig() 2476 if csig: 2477 ninfo.csig = csig 2478 2479 ninfo.timestamp = self.get_timestamp() 2480 ninfo.size = self.get_size() 2481 2482 if not self.has_builder(): 2483 # This is a source file, but it might have been a target file 2484 # in another build that included more of the DAG. Copy 2485 # any build information that's stored in the .sconsign file 2486 # into our binfo object so it doesn't get lost. 2487 old = self.get_stored_info() 2488 self.get_binfo().__dict__.update(old.binfo.__dict__) 2489 2490 self.store_info()
2491
2492 - def find_src_builder(self):
2493 if self.rexists(): 2494 return None 2495 scb = self.dir.src_builder() 2496 if scb is _null: 2497 if diskcheck_sccs(self.dir, self.name): 2498 scb = get_DefaultSCCSBuilder() 2499 elif diskcheck_rcs(self.dir, self.name): 2500 scb = get_DefaultRCSBuilder() 2501 else: 2502 scb = None 2503 if scb is not None: 2504 try: 2505 b = self.builder 2506 except AttributeError: 2507 b = None 2508 if b is None: 2509 self.builder_set(scb) 2510 return scb
2511
2512 - def has_src_builder(self):
2513 """Return whether this Node has a source builder or not. 2514 2515 If this Node doesn't have an explicit source code builder, this 2516 is where we figure out, on the fly, if there's a transparent 2517 source code builder for it. 2518 2519 Note that if we found a source builder, we also set the 2520 self.builder attribute, so that all of the methods that actually 2521 *build* this file don't have to do anything different. 2522 """ 2523 try: 2524 scb = self.sbuilder 2525 except AttributeError: 2526 scb = self.sbuilder = self.find_src_builder() 2527 return not scb is None
2528
2529 - def alter_targets(self):
2530 """Return any corresponding targets in a variant directory. 2531 """ 2532 if self.is_derived(): 2533 return [], None 2534 return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
2535
2536 - def _rmv_existing(self):
2537 self.clear_memoized_values() 2538 e = Unlink(self, [], None) 2539 if isinstance(e, SCons.Errors.BuildError): 2540 raise e
2541 2542 # 2543 # Taskmaster interface subsystem 2544 # 2545
2546 - def make_ready(self):
2547 self.has_src_builder() 2548 self.get_binfo()
2549
2550 - def prepare(self):
2551 """Prepare for this file to be created.""" 2552 SCons.Node.Node.prepare(self) 2553 2554 if self.get_state() != SCons.Node.up_to_date: 2555 if self.exists(): 2556 if self.is_derived() and not self.precious: 2557 self._rmv_existing() 2558 else: 2559 try: 2560 self._createDir() 2561 except SCons.Errors.StopError, drive: 2562 desc = "No drive `%s' for target `%s'." % (drive, self) 2563 raise SCons.Errors.StopError, desc
2564 2565 # 2566 # 2567 # 2568
2569 - def remove(self):
2570 """Remove this file.""" 2571 if self.exists() or self.islink(): 2572 self.fs.unlink(self.path) 2573 return 1 2574 return None
2575
2576 - def do_duplicate(self, src):
2577 self._createDir() 2578 Unlink(self, None, None) 2579 e = Link(self, src, None) 2580 if isinstance(e, SCons.Errors.BuildError): 2581 desc = "Cannot duplicate `%s' in `%s': %s." % (src.path, self.dir.path, e.errstr) 2582 raise SCons.Errors.StopError, desc 2583 self.linked = 1 2584 # The Link() action may or may not have actually 2585 # created the file, depending on whether the -n 2586 # option was used or not. Delete the _exists and 2587 # _rexists attributes so they can be reevaluated. 2588 self.clear()
2589 2590 memoizer_counters.append(SCons.Memoize.CountValue('exists')) 2591
2592 - def exists(self):
2593 try: 2594 return self._memo['exists'] 2595 except KeyError: 2596 pass 2597 # Duplicate from source path if we are set up to do this. 2598 if self.duplicate and not self.is_derived() and not self.linked: 2599 src = self.srcnode() 2600 if not src is self: 2601 # At this point, src is meant to be copied in a variant directory. 2602 src = src.rfile() 2603 if src.abspath != self.abspath: 2604 if src.exists(): 2605 self.do_duplicate(src) 2606 # Can't return 1 here because the duplication might 2607 # not actually occur if the -n option is being used. 2608 else: 2609 # The source file does not exist. Make sure no old 2610 # copy remains in the variant directory. 2611 if Base.exists(self) or self.islink(): 2612 self.fs.unlink(self.path) 2613 # Return None explicitly because the Base.exists() call 2614 # above will have cached its value if the file existed. 2615 self._memo['exists'] = None 2616 return None 2617 result = Base.exists(self) 2618 self._memo['exists'] = result 2619 return result
2620 2621 # 2622 # SIGNATURE SUBSYSTEM 2623 # 2624
2625 - def get_max_drift_csig(self):
2626 """ 2627 Returns the content signature currently stored for this node 2628 if it's been unmodified longer than the max_drift value, or the 2629 max_drift value is 0. Returns None otherwise. 2630 """ 2631 old = self.get_stored_info() 2632 mtime = self.get_timestamp() 2633 2634 csig = None 2635 max_drift = self.fs.max_drift 2636 if max_drift > 0: 2637 if (time.time() - mtime) > max_drift: 2638 try: 2639 n = old.ninfo 2640 if n.timestamp and n.csig and n.timestamp == mtime: 2641 csig = n.csig 2642 except AttributeError: 2643 pass 2644 elif max_drift == 0: 2645 try: 2646 csig = old.ninfo.csig 2647 except AttributeError: 2648 pass 2649 2650 return csig
2651
2652 - def get_csig(self):
2653 """ 2654 Generate a node's content signature, the digested signature 2655 of its content. 2656 2657 node - the node 2658 cache - alternate node to use for the signature cache 2659 returns - the content signature 2660 """ 2661 ninfo = self.get_ninfo() 2662 try: 2663 return ninfo.csig 2664 except AttributeError: 2665 pass 2666 2667 csig = self.get_max_drift_csig() 2668 if csig is None: 2669 2670 try: 2671 contents = self.get_contents() 2672 except IOError: 2673 # This can happen if there's actually a directory on-disk, 2674 # which can be the case if they've disabled disk checks, 2675 # or if an action with a File target actually happens to 2676 # create a same-named directory by mistake. 2677 csig = '' 2678 else: 2679 csig = SCons.Util.MD5signature(contents) 2680 2681 ninfo.csig = csig 2682 2683 return csig
2684 2685 # 2686 # DECISION SUBSYSTEM 2687 # 2688
2689 - def builder_set(self, builder):
2690 SCons.Node.Node.builder_set(self, builder) 2691 self.changed_since_last_build = self.decide_target
2692
2693 - def changed_content(self, target, prev_ni):
2694 cur_csig = self.get_csig() 2695 try: 2696 return cur_csig != prev_ni.csig 2697 except AttributeError: 2698 return 1
2699
2700 - def changed_state(self, target, prev_ni):
2701 return (self.state != SCons.Node.up_to_date)
2702
2703 - def changed_timestamp_then_content(self, target, prev_ni):
2704 if not self.changed_timestamp_match(target, prev_ni): 2705 try: 2706 self.get_ninfo().csig = prev_ni.csig 2707 except AttributeError: 2708 pass 2709 return False 2710 return self.changed_content(target, prev_ni)
2711
2712 - def changed_timestamp_newer(self, target, prev_ni):
2713 try: 2714 return self.get_timestamp() > target.get_timestamp() 2715 except AttributeError: 2716 return 1
2717
2718 - def changed_timestamp_match(self, target, prev_ni):
2719 try: 2720 return self.get_timestamp() != prev_ni.timestamp 2721 except AttributeError: 2722 return 1
2723
2724 - def decide_source(self, target, prev_ni):
2725 return target.get_build_env().decide_source(self, target, prev_ni)
2726
2727 - def decide_target(self, target, prev_ni):
2728 return target.get_build_env().decide_target(self, target, prev_ni)
2729 2730 # Initialize this Node's decider function to decide_source() because 2731 # every file is a source file until it has a Builder attached... 2732 changed_since_last_build = decide_source 2733
2734 - def is_up_to_date(self):
2735 T = 0 2736 if T: Trace('is_up_to_date(%s):' % self) 2737 if not self.exists(): 2738 if T: Trace(' not self.exists():') 2739 # The file doesn't exist locally... 2740 r = self.rfile() 2741 if r != self: 2742 # ...but there is one in a Repository... 2743 if not self.changed(r): 2744 if T: Trace(' changed(%s):' % r) 2745 # ...and it's even up-to-date... 2746 if self._local: 2747 # ...and they'd like a local copy. 2748 e = LocalCopy(self, r, None) 2749 if isinstance(e, SCons.Errors.BuildError): 2750 raise 2751 self.store_info() 2752 if T: Trace(' 1\n') 2753 return 1 2754 self.changed() 2755 if T: Trace(' None\n') 2756 return None 2757 else: 2758 r = self.changed() 2759 if T: Trace(' self.exists(): %s\n' % r) 2760 return not r
2761 2762 memoizer_counters.append(SCons.Memoize.CountValue('rfile')) 2763
2764 - def rfile(self):
2765 try: 2766 return self._memo['rfile'] 2767 except KeyError: 2768 pass 2769 result = self 2770 if not self.exists(): 2771 norm_name = _my_normcase(self.name) 2772 for dir in self.dir.get_all_rdirs(): 2773 try: node = dir.entries[norm_name] 2774 except KeyError: node = dir.file_on_disk(self.name) 2775 if node and node.exists() and \ 2776 (isinstance(node, File) or isinstance(node, Entry) \ 2777 or not node.is_derived()): 2778 result = node 2779 break 2780 self._memo['rfile'] = result 2781 return result
2782
2783 - def rstr(self):
2784 return str(self.rfile())
2785
2786 - def get_cachedir_csig(self):
2787 """ 2788 Fetch a Node's content signature for purposes of computing 2789 another Node's cachesig. 2790 2791 This is a wrapper around the normal get_csig() method that handles 2792 the somewhat obscure case of using CacheDir with the -n option. 2793 Any files that don't exist would normally be "built" by fetching 2794 them from the cache, but the normal get_csig() method will try 2795 to open up the local file, which doesn't exist because the -n 2796 option meant we didn't actually pull the file from cachedir. 2797 But since the file *does* actually exist in the cachedir, we 2798 can use its contents for the csig. 2799 """ 2800 try: 2801 return self.cachedir_csig 2802 except AttributeError: 2803 pass 2804 2805 cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self) 2806 if not self.exists() and cachefile and os.path.exists(cachefile): 2807 contents = open(cachefile, 'rb').read() 2808 self.cachedir_csig = SCons.Util.MD5signature(contents) 2809 else: 2810 self.cachedir_csig = self.get_csig() 2811 return self.cachedir_csig
2812
2813 - def get_cachedir_bsig(self):
2814 try: 2815 return self.cachesig 2816 except AttributeError: 2817 pass 2818 2819 # Add the path to the cache signature, because multiple 2820 # targets built by the same action will all have the same 2821 # build signature, and we have to differentiate them somehow. 2822 children = self.children() 2823 sigs = map(lambda n: n.get_cachedir_csig(), children) 2824 executor = self.get_executor() 2825 sigs.append(SCons.Util.MD5signature(executor.get_contents())) 2826 sigs.append(self.path) 2827 self.cachesig = SCons.Util.MD5collect(sigs) 2828 return self.cachesig
2829 2830 default_fs = None 2831
2832 -def get_default_fs():
2833 global default_fs 2834 if not default_fs: 2835 default_fs = FS() 2836 return default_fs
2837
2838 -class FileFinder:
2839 """ 2840 """ 2841 if SCons.Memoize.use_memoizer: 2842 __metaclass__ = SCons.Memoize.Memoized_Metaclass 2843 2844 memoizer_counters = [] 2845
2846 - def __init__(self):
2847 self._memo = {}
2848
2849 - def filedir_lookup(self, p, fd=None):
2850 """ 2851 A helper method for find_file() that looks up a directory for 2852 a file we're trying to find. This only creates the Dir Node if 2853 it exists on-disk, since if the directory doesn't exist we know 2854 we won't find any files in it... :-) 2855 2856 It would be more compact to just use this as a nested function 2857 with a default keyword argument (see the commented-out version 2858 below), but that doesn't work unless you have nested scopes, 2859 so we define it here just so this work under Python 1.5.2. 2860 """ 2861 if fd is None: 2862 fd = self.default_filedir 2863 dir, name = os.path.split(fd) 2864 drive, d = os.path.splitdrive(dir) 2865 if d in ('/', os.sep): 2866 return p.fs.get_root(drive).dir_on_disk(name) 2867 if dir: 2868 p = self.filedir_lookup(p, dir) 2869 if not p: 2870 return None 2871 norm_name = _my_normcase(name) 2872 try: 2873 node = p.entries[norm_name] 2874 except KeyError: 2875 return p.dir_on_disk(name) 2876 if isinstance(node, Dir): 2877 return node 2878 if isinstance(node, Entry): 2879 node.must_be_same(Dir) 2880 return node 2881 return None
2882
2883 - def _find_file_key(self, filename, paths, verbose=None):
2884 return (filename, paths)
2885 2886 memoizer_counters.append(SCons.Memoize.CountDict('find_file', _find_file_key)) 2887
2888 - def find_file(self, filename, paths, verbose=None):
2889 """ 2890 find_file(str, [Dir()]) -> [nodes] 2891 2892 filename - a filename to find 2893 paths - a list of directory path *nodes* to search in. Can be 2894 represented as a list, a tuple, or a callable that is 2895 called with no arguments and returns the list or tuple. 2896 2897 returns - the node created from the found file. 2898 2899 Find a node corresponding to either a derived file or a file 2900 that exists already. 2901 2902 Only the first file found is returned, and none is returned 2903 if no file is found. 2904 """ 2905 memo_key = self._find_file_key(filename, paths) 2906 try: 2907 memo_dict = self._memo['find_file'] 2908 except KeyError: 2909 memo_dict = {} 2910 self._memo['find_file'] = memo_dict 2911 else: 2912 try: 2913 return memo_dict[memo_key] 2914 except KeyError: 2915 pass 2916 2917 if verbose: 2918 if not SCons.Util.is_String(verbose): 2919 verbose = "find_file" 2920 if not callable(verbose): 2921 verbose = ' %s: ' % verbose 2922 verbose = lambda s, v=verbose: sys.stdout.write(v + s) 2923 else: 2924 verbose = lambda x: x 2925 2926 filedir, filename = os.path.split(filename) 2927 if filedir: 2928 # More compact code that we can't use until we drop 2929 # support for Python 1.5.2: 2930 # 2931 #def filedir_lookup(p, fd=filedir): 2932 # """ 2933 # A helper function that looks up a directory for a file 2934 # we're trying to find. This only creates the Dir Node 2935 # if it exists on-disk, since if the directory doesn't 2936 # exist we know we won't find any files in it... :-) 2937 # """ 2938 # dir, name = os.path.split(fd) 2939 # if dir: 2940 # p = filedir_lookup(p, dir) 2941 # if not p: 2942 # return None 2943 # norm_name = _my_normcase(name) 2944 # try: 2945 # node = p.entries[norm_name] 2946 # except KeyError: 2947 # return p.dir_on_disk(name) 2948 # if isinstance(node, Dir): 2949 # return node 2950 # if isinstance(node, Entry): 2951 # node.must_be_same(Dir) 2952 # return node 2953 # if isinstance(node, Dir) or isinstance(node, Entry): 2954 # return node 2955 # return None 2956 #paths = filter(None, map(filedir_lookup, paths)) 2957 2958 self.default_filedir = filedir 2959 paths = filter(None, map(self.filedir_lookup, paths)) 2960 2961 result = None 2962 for dir in paths: 2963 verbose("looking for '%s' in '%s' ...\n" % (filename, dir)) 2964 node, d = dir.srcdir_find_file(filename) 2965 if node: 2966 verbose("... FOUND '%s' in '%s'\n" % (filename, d)) 2967 result = node 2968 break 2969 2970 memo_dict[memo_key] = result 2971 2972 return result
2973 2974 find_file = FileFinder().find_file 2975