📄 gen_base.py
字号:
if src[-5:] == '.java':
objname = src[:-5] + self.objext
# As .class files are likely not generated into the same
# directory as the source files, the object path may need
# adjustment. To this effect, take "target_ob.classes" into
# account.
dirs = build_path_split(objname)
sourcedirs = dirs[:-1] # Last element is the .class file name.
while sourcedirs:
if sourcedirs.pop() in self.packages:
sourcepath = apply(build_path_join, sourcedirs)
objname = apply(build_path_join,
[self.classes] + dirs[len(sourcedirs):])
break
else:
raise GenError('Unable to find Java package root in path "%s"' % objname)
else:
raise GenError('ERROR: unknown file extension on "' + src + '"')
ofile = ObjectFile(objname, self.compile_cmd)
sfile = SourceFile(src, reldir)
sfile.sourcepath = sourcepath
# object depends upon source
graph.add(DT_OBJECT, ofile, sfile)
self.deps.append(sfile)
# target (a linked item) depends upon object
graph.add(DT_LINK, self.name, ofile)
# collect all the paths where stuff might get built
### we should collect this from the dependency nodes rather than
### the sources. "what dir are you going to put yourself into?"
graph.add(DT_LIST, LT_TARGET_DIRS, self.path)
graph.add(DT_LIST, LT_TARGET_DIRS, self.classes)
for pattern in string.split(self.sources):
dirname = build_path_dirname(pattern)
if dirname:
graph.add(DT_LIST, LT_TARGET_DIRS,
build_path_join(self.path, dirname))
graph.add(DT_INSTALL, self.name, self)
_build_types = {
'exe' : TargetExe,
'script' : TargetScript,
'lib' : TargetLib,
'doc' : TargetDoc,
'swig' : TargetSWIG,
'project' : TargetProject,
'swig_runtime' : TargetSWIGRuntime,
'swig_lib' : TargetSWIGLib,
'swig_project' : TargetSWIGProject,
'ra-module': TargetRaModule,
'fs-module': TargetFsModule,
'apache-mod': TargetApacheMod,
'javah' : TargetJavaHeaders,
'java' : TargetJavaClasses,
'i18n' : TargetI18N,
}
class Config:
pass
class GenError(Exception):
pass
_predef_sections = [
'options',
'static-apache',
'test-scripts',
'bdb-test-scripts',
'swig-dirs',
]
def _filter_sections(t):
"""Sort list of section names and remove predefined sections"""
t = t[:]
for s in _predef_sections:
if s in t:
t.remove(s)
t.sort()
return t
# Path Handling Functions
#
# Build paths specified in build.conf are assumed to be always separated
# by forward slashes, regardless of the current running os.
#
# Native paths are paths seperated by os.sep.
def native_path(path):
"""Convert a build path to a native path"""
return string.replace(path, '/', os.sep)
def build_path(path):
"""Convert a native path to a build path"""
path = string.replace(path, os.sep, '/')
if os.altsep:
path = string.replace(path, os.altsep, '/')
return path
def build_path_join(*path_parts):
"""Join path components into a build path"""
return string.join(path_parts, '/')
def build_path_split(path):
"""Return list of components in a build path"""
return string.split(path, '/')
def build_path_splitfile(path):
"""Return the filename and directory portions of a file path"""
pos = string.rfind(path, '/')
if pos > 0:
return path[:pos], path[pos+1:]
elif pos == 0:
return path[0], path[1:]
else:
return "", path
def build_path_dirname(path):
"""Return the directory portion of a file path"""
return build_path_splitfile(path)[0]
def build_path_basename(path):
"""Return the filename portion of a file path"""
return build_path_splitfile(path)[1]
def build_path_retreat(path):
"Given a relative directory, return ../ paths to retreat to the origin."
return ".." + "/.." * string.count(path, '/')
def build_path_strip(path, files):
"Strip the given path from each file."
l = len(path)
result = [ ]
for file in files:
if len(file) > l and file[:l] == path and file[l] == '/':
result.append(file[l+1:])
else:
result.append(file)
return result
def _collect_paths(pats, path=None):
"""Find files matching a space separated list of globs
pats (string) is the list of glob patterns
path (string), if specified, is a path that will be prepended to each
glob pattern before it is evaluated
If path is none the return value is a list of filenames, otherwise
the return value is a list of 2-tuples. The first element in each tuple
is a matching filename and the second element is the portion of the
glob pattern which matched the file before its last forward slash (/)
"""
result = [ ]
for base_pat in string.split(pats):
if path:
pattern = build_path_join(path, base_pat)
else:
pattern = base_pat
files = glob.glob(native_path(pattern)) or [pattern]
if path is None:
# just append the names to the result list
for file in files:
result.append(build_path(file))
else:
# if we have paths, then we need to record how each source is located
# relative to the specified path
reldir = build_path_dirname(base_pat)
for file in files:
result.append((build_path(file), reldir))
return result
def _find_includes(fname, include_deps):
"""Return list of files in include_deps included by fname"""
hdrs = _scan_for_includes(fname, include_deps.keys())
return _include_closure(hdrs, include_deps).keys()
def _create_include_deps(includes, prev_deps={}):
"""Find files included by a list of files
includes (sequence of strings) is a list of files which should
be scanned for includes
prev_deps (dictionary) is an optional parameter which may contain
the return value of a previous call to _create_include_deps. All
data inside will be included in the return value of the current
call.
Return value is a dictionary with one entry for each file that
was scanned (in addition the entries from prev_deps). The key
for an entry is the short file name of the file that was scanned
and the value is a 2-tuple containing the long file name and a
dictionary of files included by that file.
"""
shorts = map(os.path.basename, includes)
# limit intra-header dependencies to just these headers, and what we
# may have found before
limit = shorts + prev_deps.keys()
deps = prev_deps.copy()
for inc in includes:
short = os.path.basename(inc)
deps[short] = (inc, _scan_for_includes(inc, limit))
# keep recomputing closures until we see no more changes
while 1:
changes = 0
for short in shorts:
old = deps[short]
deps[short] = (old[0], _include_closure(old[1], deps))
if not changes:
ok = old[1].keys()
ok.sort()
nk = deps[short][1].keys()
nk.sort()
changes = ok != nk
if not changes:
return deps
def _include_closure(hdrs, deps):
"""Update a set of dependencies with dependencies of dependencies
hdrs (dictionary) is a set of dependencies. It is a dictionary with
filenames as keys and None as values
deps (dictionary) is a big catalog of dependencies in the format
returned by _create_include_deps.
Return value is a copy of the hdrs dictionary updated with new
entries for files that the existing entries include, according
to the information in deps.
"""
new = hdrs.copy()
for h in hdrs.keys():
new.update(deps[h][1])
return new
_re_include = re.compile(r'^#\s*include\s*[<"]([^<"]+)[>"]')
def _scan_for_includes(fname, limit):
"""Find headers directly included by a C source file.
fname (string) is the name of the file to scan
limit (sequence or dictionary) is a collection of file names
which may be included. Included files which aren't found
in this collection will be ignored.
Return value is a dictionary with included file names as keys and
None as values.
"""
# note: we don't worry about duplicates in the return list
hdrs = { }
for line in fileinput.input(fname):
match = _re_include.match(line)
if match:
h = native_path(match.group(1))
if h in limit:
hdrs[h] = None
return hdrs
def _sorted_files(graph, area):
"Given a list of targets, sort them based on their dependencies."
# we're going to just go with a naive algorithm here. these lists are
# going to be so short, that we can use O(n^2) or whatever this is.
inst_targets = graph.get_sources(DT_INSTALL, area)
# first we need our own copy of the target list since we're going to
# munge it.
targets = inst_targets[:]
# the output list of the targets' files
files = [ ]
# loop while we have targets remaining:
while targets:
# find a target that has no dependencies in our current targets list.
for t in targets:
s = graph.get_sources(DT_LINK, t.name, Target) \
+ graph.get_sources(DT_NONLIB, t.name, Target)
for d in s:
if d in targets:
break
else:
# no dependencies found in the targets list. this is a good "base"
# to add to the files list now.
# If the filename is blank, see if there are any NONLIB dependencies
# rather than adding a blank filename to the list.
if not isinstance(t, TargetI18N) and not isinstance(t, TargetJava):
files.append(t.filename)
else:
s = graph.get_sources(DT_NONLIB, t.name)
for d in s:
if d not in targets:
files.append(d.filename)
# don't consider this target any more
targets.remove(t)
# break out of search through targets
break
else:
# we went through the entire target list and everything had at least
# one dependency on another target. thus, we have a circular dependency
# tree. somebody messed up the .conf file, or the app truly does have
# a loop (and if so, they're screwed; libtool can't relink a lib at
# install time if the dependent libs haven't been installed yet)
raise CircularDependencies()
return files
class CircularDependencies(Exception):
pass
def unique(seq):
"Eliminate duplicates from a sequence"
list = [ ]
dupes = { }
for e in seq:
if not dupes.has_key(e):
dupes[e] = None
list.append(e)
return list
### End of file.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -