⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 coverage.py

📁 SQLAlchemy. 经典的Python ORM框架。学习必看。
💻 PY
📖 第 1 页 / 共 3 页
字号:
            for j in ['annotate', 'report', 'collect']:                if settings.get(i) and settings.get(j):                    help_fn("You can't specify the '%s' and '%s' "                              "options at the same time." % (i, j))        args_needed = (settings.get('execute')                       or settings.get('annotate')                       or settings.get('report'))        action = (settings.get('erase')                   or settings.get('collect')                  or args_needed)        if not action:            help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")        if not args_needed and args:            help_fn("Unexpected arguments: %s" % " ".join(args))                self.parallel_mode = settings.get('parallel-mode')        self.get_ready()        if settings.get('erase'):            self.erase()        if settings.get('execute'):            if not args:                help_fn("Nothing to do.")            sys.argv = args            self.start()            import __main__            sys.path[0] = os.path.dirname(sys.argv[0])            execfile(sys.argv[0], __main__.__dict__)        if settings.get('collect'):            self.collect()        if not args:            args = self.cexecuted.keys()                ignore_errors = settings.get('ignore-errors')        show_missing = settings.get('show-missing')        directory = settings.get('directory=')        omit = settings.get('omit=')        if omit is not None:            omit = omit.split(',')        else:            omit = []        if settings.get('report'):            self.report(args, show_missing, ignore_errors, omit_prefixes=omit)        if settings.get('annotate'):            self.annotate(args, directory, ignore_errors, omit_prefixes=omit)    def use_cache(self, usecache, cache_file=None):        self.usecache = usecache        if cache_file and not self.cache:            self.cache_default = cache_file            def get_ready(self, parallel_mode=False):        if self.usecache and not self.cache:            self.cache = os.environ.get(self.cache_env, self.cache_default)            if self.parallel_mode:                self.cache += "." + gethostname() + "." + str(os.getpid())            self.restore()        self.analysis_cache = {}            def start(self, parallel_mode=False):        self.get_ready()        if self.nesting == 0:                               #pragma: no cover            sys.settrace(self.t)            if hasattr(threading, 'settrace'):                threading.settrace(self.t)        self.nesting += 1            def stop(self):        self.nesting -= 1        if self.nesting == 0:                               #pragma: no cover            sys.settrace(None)            if hasattr(threading, 'settrace'):                threading.settrace(None)    def erase(self):        self.get_ready()        self.c = {}        self.analysis_cache = {}        self.cexecuted = {}        if self.cache and os.path.exists(self.cache):            os.remove(self.cache)    def exclude(self, re):        if self.exclude_re:            self.exclude_re += "|"        self.exclude_re += "(" + re + ")"    def begin_recursive(self):        self.cstack.append(self.c)        self.xstack.append(self.exclude_re)            def end_recursive(self):        self.c = self.cstack.pop()        self.exclude_re = self.xstack.pop()    # save().  Save coverage data to the coverage cache.    def save(self):        if self.usecache and self.cache:            self.canonicalize_filenames()            cache = open(self.cache, 'wb')            import marshal            marshal.dump(self.cexecuted, cache)            cache.close()    # restore().  Restore coverage data from the coverage cache (if it exists).    def restore(self):        self.c = {}        self.cexecuted = {}        assert self.usecache        if os.path.exists(self.cache):            self.cexecuted = self.restore_file(self.cache)    def restore_file(self, file_name):        try:            cache = open(file_name, 'rb')            import marshal            cexecuted = marshal.load(cache)            cache.close()            if isinstance(cexecuted, types.DictType):                return cexecuted            else:                return {}        except:            return {}    # collect(). Collect data in multiple files produced by parallel mode    def collect(self):        cache_dir, local = os.path.split(self.cache)        for f in os.listdir(cache_dir or '.'):            if not f.startswith(local):                continue            full_path = os.path.join(cache_dir, f)            cexecuted = self.restore_file(full_path)            self.merge_data(cexecuted)    def merge_data(self, new_data):        for file_name, file_data in new_data.items():            if self.cexecuted.has_key(file_name):                self.merge_file_data(self.cexecuted[file_name], file_data)            else:                self.cexecuted[file_name] = file_data    def merge_file_data(self, cache_data, new_data):        for line_number in new_data.keys():            if not cache_data.has_key(line_number):                cache_data[line_number] = new_data[line_number]    # canonical_filename(filename).  Return a canonical filename for the    # file (that is, an absolute path with no redundant components and    # normalized case).  See [GDR 2001-12-04b, 3.3].    def canonical_filename(self, filename):        if not self.canonical_filename_cache.has_key(filename):            f = filename            if os.path.isabs(f) and not os.path.exists(f):                f = os.path.basename(f)            if not os.path.isabs(f):                for path in [os.curdir] + sys.path:                    g = os.path.join(path, f)                    if os.path.exists(g):                        f = g                        break            cf = os.path.normcase(os.path.abspath(f))            self.canonical_filename_cache[filename] = cf        return self.canonical_filename_cache[filename]    # canonicalize_filenames().  Copy results from "c" to "cexecuted",     # canonicalizing filenames on the way.  Clear the "c" map.    def canonicalize_filenames(self):        for filename, lineno in self.c.keys():            if filename == '<string>':                # Can't do anything useful with exec'd strings, so skip them.                continue            f = self.canonical_filename(filename)            if not self.cexecuted.has_key(f):                self.cexecuted[f] = {}            self.cexecuted[f][lineno] = 1        self.c = {}    # morf_filename(morf).  Return the filename for a module or file.    def morf_filename(self, morf):        if isinstance(morf, types.ModuleType):            if not hasattr(morf, '__file__'):                raise CoverageException, "Module has no __file__ attribute."            f = morf.__file__        else:            f = morf        return self.canonical_filename(f)    # analyze_morf(morf).  Analyze the module or filename passed as    # the argument.  If the source code can't be found, raise an error.    # Otherwise, return a tuple of (1) the canonical filename of the    # source code for the module, (2) a list of lines of statements    # in the source code, (3) a list of lines of excluded statements,    # and (4), a map of line numbers to multi-line line number ranges, for    # statements that cross lines.        def analyze_morf(self, morf):        if self.analysis_cache.has_key(morf):            return self.analysis_cache[morf]        filename = self.morf_filename(morf)        ext = os.path.splitext(filename)[1]        if ext == '.pyc':            if not os.path.exists(filename[0:-1]):                raise CoverageException, ("No source for compiled code '%s'."                                   % filename)            filename = filename[0:-1]        elif ext != '.py':            raise CoverageException, "File '%s' not Python source." % filename        source = open(filename, 'r')        lines, excluded_lines, line_map = self.find_executable_statements(            source.read(), exclude=self.exclude_re            )        source.close()        result = filename, lines, excluded_lines, line_map        self.analysis_cache[morf] = result        return result    def first_line_of_tree(self, tree):        while True:            if len(tree) == 3 and type(tree[2]) == type(1):                return tree[2]            tree = tree[1]        def last_line_of_tree(self, tree):        while True:            if len(tree) == 3 and type(tree[2]) == type(1):                return tree[2]            tree = tree[-1]        def find_docstring_pass_pair(self, tree, spots):        for i in range(1, len(tree)):            if self.is_string_constant(tree[i]) and self.is_pass_stmt(tree[i+1]):                first_line = self.first_line_of_tree(tree[i])                last_line = self.last_line_of_tree(tree[i+1])                self.record_multiline(spots, first_line, last_line)            def is_string_constant(self, tree):        try:            return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.expr_stmt        except:            return False            def is_pass_stmt(self, tree):        try:            return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.pass_stmt        except:            return False    def record_multiline(self, spots, i, j):        for l in range(i, j+1):            spots[l] = (i, j)                def get_suite_spots(self, tree, spots):        """ Analyze a parse tree to find suite introducers which span a number            of lines.        """        for i in range(1, len(tree)):            if type(tree[i]) == type(()):                if tree[i][0] == symbol.suite:                    # Found a suite, look back for the colon and keyword.                    lineno_colon = lineno_word = None                    for j in range(i-1, 0, -1):                        if tree[j][0] == token.COLON:                            # Colons are never executed themselves: we want the                            # line number of the last token before the colon.                            lineno_colon = self.last_line_of_tree(tree[j-1])                        elif tree[j][0] == token.NAME:                            if tree[j][1] == 'elif':                                # Find the line number of the first non-terminal                                # after the keyword.                                t = tree[j+1]                                while t and token.ISNONTERMINAL(t[0]):                                    t = t[1]                                if t:                                    lineno_word = t[2]                            else:                                lineno_word = tree[j][2]                            break                        elif tree[j][0] == symbol.except_clause:                            # "except" clauses look like:                            # ('except_clause', ('NAME', 'except', lineno), ...)                            if tree[j][1][0] == token.NAME:                                lineno_word = tree[j][1][2]                                break                    if lineno_colon and lineno_word:                        # Found colon and keyword, mark all the lines                        # between the two with the two line numbers.                        self.record_multiline(spots, lineno_word, lineno_colon)                    # "pass" statements are tricky: different versions of Python                    # treat them differently, especially in the common case of a                    # function with a doc string and a single pass statement.                    self.find_docstring_pass_pair(tree[i], spots)                                    elif tree[i][0] == symbol.simple_stmt:                    first_line = self.first_line_of_tree(tree[i])                    last_line = self.last_line_of_tree(tree[i])                    if first_line != last_line:                        self.record_multiline(spots, first_line, last_line)                self.get_suite_spots(tree[i], spots)    def find_executable_statements(self, text, exclude=None):        # Find lines which match an exclusion pattern.        excluded = {}        suite_spots = {}        if exclude:            reExclude = re.compile(exclude)            lines = text.split('\n')            for i in range(len(lines)):                if reExclude.search(lines[i]):                    excluded[i+1] = 1        # Parse the code and analyze the parse tree to find out which statements        # are multiline, and where suites begin and end.        import parser        tree = parser.suite(text+'\n\n').totuple(1)        self.get_suite_spots(tree, suite_spots)        #print "Suite spots:", suite_spots                # Use the compiler module to parse the text and find the executable        # statements.  We add newlines to be impervious to final partial lines.        statements = {}        ast = compiler.parse(text+'\n\n')        visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)        compiler.walk(ast, visitor, walker=visitor)        lines = statements.keys()        lines.sort()        excluded_lines = excluded.keys()        excluded_lines.sort()        return lines, excluded_lines, suite_spots    # format_lines(statements, lines).  Format a list of line numbers    # for printing by coalescing groups of lines as long as the lines    # represent consecutive statements.  This will coalesce even if    # there are gaps between statements, so if statements =    # [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then    # format_lines will return "1-2, 5-11, 13-14".    def format_lines(self, statements, lines):        pairs = []        i = 0        j = 0        start = None        pairs = []        while i < len(statements) and j < len(lines):            if statements[i] == lines[j]:                if start == None:                    start = lines[j]                end = lines[j]                j = j + 1            elif start:                pairs.append((start, end))                start = None            i = i + 1        if start:            pairs.append((start, end))        def stringify(pair):

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -