Thu, 07 Jan 2010 13:42:51 +0000
Updated coverage.py to version 3.2.
--- a/DebugClients/Python/coverage/__init__.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/__init__.py Thu Jan 07 13:42:51 2010 +0000 @@ -5,12 +5,14 @@ """ -__version__ = "3.0.1" # see detailed history in CHANGES.txt +__version__ = "3.2" # see detailed history in CHANGES.txt + +__url__ = "http://nedbatchelder.com/code/coverage" -from control import coverage -from data import CoverageData -from cmdline import main, CoverageScript -from misc import CoverageException +from coverage.control import coverage +from coverage.data import CoverageData +from coverage.cmdline import main, CoverageScript +from coverage.misc import CoverageException # Module-level functions. The original API to this module was based on @@ -24,10 +26,10 @@ def _singleton_method(name): """Return a function to the `name` method on a singleton `coverage` object. - + The singleton object is created the first time one of these functions is called. - + """ def wrapper(*args, **kwargs): """Singleton wrapper around a coverage method.""" @@ -78,4 +80,4 @@ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -# DAMAGE. \ No newline at end of file +# DAMAGE.
--- a/DebugClients/Python/coverage/annotate.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/annotate.py Thu Jan 07 13:42:51 2010 +0000 @@ -2,15 +2,15 @@ import os, re -from report import Reporter +from coverage.report import Reporter class AnnotateReporter(Reporter): """Generate annotated source files showing line coverage. - + This reporter creates annotated copies of the measured source files. Each .py file is copied as a .py,cover file, with a left-hand margin annotating each line:: - + > def h(x): - if 0: #pragma: no cover - pass @@ -18,31 +18,34 @@ ! a = 1 > else: > a = 2 - + > h(2) Executed lines use '>', lines not executed use '!', lines excluded from consideration use '-'. - + """ def __init__(self, coverage, ignore_errors=False): super(AnnotateReporter, self).__init__(coverage, ignore_errors) self.directory = None - + blank_re = re.compile(r"\s*(#|$)") else_re = re.compile(r"\s*else\s*:\s*(#|$)") def report(self, morfs, directory=None, omit_prefixes=None): """Run the report.""" self.report_files(self.annotate_file, morfs, directory, omit_prefixes) - - def annotate_file(self, cu, statements, excluded, missing): + + def annotate_file(self, cu, analysis): """Annotate a single file. - + `cu` is the CodeUnit for the file to annotate. - + """ + if not cu.relative: + return + filename = cu.filename source = cu.source_file() if self.directory: @@ -52,6 +55,10 @@ dest_file = filename + ",cover" dest = open(dest_file, 'w') + statements = analysis.statements + missing = analysis.missing + excluded = analysis.excluded + lineno = 0 i = 0 j = 0 @@ -70,7 +77,7 @@ if self.blank_re.match(line): dest.write(' ') elif self.else_re.match(line): - # Special logic for lines containing only 'else:'. + # Special logic for lines containing only 'else:'. if i >= len(statements) and j >= len(missing): dest.write('! ') elif i >= len(statements) or j >= len(missing): @@ -87,4 +94,4 @@ dest.write('! ') dest.write(line) source.close() - dest.close() \ No newline at end of file + dest.close()
--- a/DebugClients/Python/coverage/backward.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/backward.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,15 +1,17 @@ """Add things to old Pythons so I can pretend they are newer.""" -# pylint: disable-msg=W0622 -# (Redefining built-in blah) -# The whole point of this file is to redefine built-ins, so shut up about it. +# This file does lots of tricky stuff, so disable a bunch of lintisms. +# pylint: disable-msg=F0401,W0611,W0622 +# F0401: Unable to import blah +# W0611: Unused import blah +# W0622: Redefining built-in blah +import os, sys # Python 2.3 doesn't have `set` try: set = set # new in 2.4 except NameError: - # (Redefining built-in 'set') from sets import Set as set @@ -22,3 +24,49 @@ lst = list(iterable) lst.sort() return lst + +# Pythons 2 and 3 differ on where to get StringIO + +try: + from cStringIO import StringIO + BytesIO = StringIO +except ImportError: + from io import StringIO, BytesIO + +# What's a string called? + +try: + string_class = basestring +except NameError: + string_class = str + +# Where do pickles come from? + +try: + import cPickle as pickle +except ImportError: + import pickle + +# range or xrange? + +try: + range = xrange +except NameError: + range = range + +# Exec is a statement in Py2, a function in Py3 + +if sys.hexversion > 0x03000000: + def exec_function(source, filename, global_map): + """A wrapper around exec().""" + exec(compile(source, filename, "exec"), global_map) +else: + # OK, this is pretty gross. In Py2, exec was a statement, but that will + # be a syntax error if we try to put it in a Py3 file, even if it is never + # executed. So hide it inside an evaluated string literal instead. + eval(compile("""\ +def exec_function(source, filename, global_map): + exec compile(source, filename, "exec") in global_map +""", + "<exec_function>", "exec" + ))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/DebugClients/Python/coverage/bytecode.py Thu Jan 07 13:42:51 2010 +0000 @@ -0,0 +1,81 @@ +"""Bytecode manipulation for coverage.py""" + +import opcode, sys, types + +class ByteCode(object): + """A single bytecode.""" + def __init__(self): + self.offset = -1 + self.op = -1 + self.arg = -1 + self.next_offset = -1 + self.jump_to = -1 + + +class ByteCodes(object): + """Iterator over byte codes in `code`. + + Returns `ByteCode` objects. + + """ + def __init__(self, code): + self.code = code + self.offset = 0 + + if sys.hexversion > 0x03000000: + def __getitem__(self, i): + return self.code[i] + else: + def __getitem__(self, i): + return ord(self.code[i]) + + def __iter__(self): + return self + + def __next__(self): + if self.offset >= len(self.code): + raise StopIteration + + bc = ByteCode() + bc.op = self[self.offset] + bc.offset = self.offset + + next_offset = self.offset+1 + if bc.op >= opcode.HAVE_ARGUMENT: + bc.arg = self[self.offset+1] + 256*self[self.offset+2] + next_offset += 2 + + label = -1 + if bc.op in opcode.hasjrel: + label = next_offset + bc.arg + elif bc.op in opcode.hasjabs: + label = bc.arg + bc.jump_to = label + + bc.next_offset = self.offset = next_offset + return bc + + next = __next__ # Py2k uses an old-style non-dunder name. + + +class CodeObjects(object): + """Iterate over all the code objects in `code`.""" + def __init__(self, code): + self.stack = [code] + + def __iter__(self): + return self + + def __next__(self): + if self.stack: + # We're going to return the code object on the stack, but first + # push its children for later returning. + code = self.stack.pop() + for c in code.co_consts: + if isinstance(c, types.CodeType): + self.stack.append(c) + return code + + raise StopIteration + + next = __next__
--- a/DebugClients/Python/coverage/cmdline.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/cmdline.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,20 +1,522 @@ """Command-line support for Coverage.""" -import getopt, sys +import optparse, re, sys + +from coverage.execfile import run_python_file +from coverage.misc import CoverageException + + +class Opts(object): + """A namespace class for individual options we'll build parsers from.""" + + append = optparse.Option( + '-a', '--append', action='store_false', dest="erase_first", + help="Append coverage data to .coverage, otherwise it is started " + "clean with each run." + ) + branch = optparse.Option( + '', '--branch', action='store_true', + help="Measure branch coverage in addition to statement coverage." + ) + directory = optparse.Option( + '-d', '--directory', action='store', + metavar="DIR", + help="Write the output files to DIR." + ) + help = optparse.Option( + '-h', '--help', action='store_true', + help="Get help on this command." + ) + ignore_errors = optparse.Option( + '-i', '--ignore-errors', action='store_true', + help="Ignore errors while reading source files." + ) + pylib = optparse.Option( + '-L', '--pylib', action='store_true', + help="Measure coverage even inside the Python installed library, " + "which isn't done by default." + ) + show_missing = optparse.Option( + '-m', '--show-missing', action='store_true', + help="Show line numbers of statements in each module that weren't " + "executed." + ) + old_omit = optparse.Option( + '-o', '--omit', action='store', + metavar="PRE1,PRE2,...", + help="Omit files when their filename path starts with one of these " + "prefixes." + ) + omit = optparse.Option( + '', '--omit', action='store', + metavar="PRE1,PRE2,...", + help="Omit files when their filename path starts with one of these " + "prefixes." + ) + output_xml = optparse.Option( + '-o', '', action='store', dest="outfile", + metavar="OUTFILE", + help="Write the XML report to this file. Defaults to 'coverage.xml'" + ) + parallel_mode = optparse.Option( + '-p', '--parallel-mode', action='store_true', + help="Include the machine name and process id in the .coverage " + "data file name." + ) + timid = optparse.Option( + '', '--timid', action='store_true', + help="Use a simpler but slower trace method. Try this if you get " + "seemingly impossible results!" + ) + version = optparse.Option( + '', '--version', action='store_true', + help="Display version information and exit." + ) + + +class CoverageOptionParser(optparse.OptionParser, object): + """Base OptionParser for coverage. + + Problems don't exit the program. + Defaults are initialized for all options. + + """ + + def __init__(self, *args, **kwargs): + super(CoverageOptionParser, self).__init__( + add_help_option=False, *args, **kwargs + ) + self.set_defaults( + actions=[], + branch=None, + directory=None, + help=None, + ignore_errors=None, + omit=None, + parallel_mode=None, + pylib=None, + show_missing=None, + timid=None, + erase_first=None, + version=None, + ) + + self.disable_interspersed_args() + self.help_fn = lambda: None + + class OptionParserError(Exception): + """Used to stop the optparse error handler ending the process.""" + pass + + def parse_args(self, args=None, options=None): + """Call optparse.parse_args, but return a triple: + + (ok, options, args) + + """ + try: + options, args = \ + super(CoverageOptionParser, self).parse_args(args, options) + except self.OptionParserError: + return False, None, None + return True, options, args + + def error(self, msg): + """Override optparse.error so sys.exit doesn't get called.""" + self.help_fn(msg) + raise self.OptionParserError + + +class ClassicOptionParser(CoverageOptionParser): + """Command-line parser for coverage.py classic arguments.""" + + def __init__(self): + super(ClassicOptionParser, self).__init__() + + self.add_action('-a', '--annotate', 'annotate') + self.add_action('-b', '--html', 'html') + self.add_action('-c', '--combine', 'combine') + self.add_action('-e', '--erase', 'erase') + self.add_action('-r', '--report', 'report') + self.add_action('-x', '--execute', 'execute') + + self.add_options([ + Opts.directory, + Opts.help, + Opts.ignore_errors, + Opts.pylib, + Opts.show_missing, + Opts.old_omit, + Opts.parallel_mode, + Opts.timid, + Opts.version, + ]) + + def add_action(self, dash, dashdash, action_code): + """Add a specialized option that is the action to execute.""" + option = self.add_option(dash, dashdash, action='callback', + callback=self._append_action + ) + option.action_code = action_code + + def _append_action(self, option, opt_unused, value_unused, parser): + """Callback for an option that adds to the `actions` list.""" + parser.values.actions.append(option.action_code) + + +class CmdOptionParser(CoverageOptionParser): + """Parse one of the new-style commands for coverage.py.""" + + def __init__(self, action, options=None, defaults=None, usage=None, + cmd=None, description=None + ): + """Create an OptionParser for a coverage command. + + `action` is the slug to put into `options.actions`. + `options` is a list of Option's for the command. + `defaults` is a dict of default value for options. + `usage` is the usage string to display in help. + `cmd` is the command name, if different than `action`. + `description` is the description of the command, for the help text. + + """ + if usage: + usage = "%prog " + usage + super(CmdOptionParser, self).__init__( + prog="coverage %s" % (cmd or action), + usage=usage, + description=description, + ) + self.set_defaults(actions=[action], **(defaults or {})) + if options: + self.add_options(options) + self.cmd = cmd or action + + def __eq__(self, other): + # A convenience equality, so that I can put strings in unit test + # results, and they will compare equal to objects. + return (other == "<CmdOptionParser:%s>" % self.cmd) + + +CMDS = { + 'annotate': CmdOptionParser("annotate", + [ + Opts.directory, + Opts.ignore_errors, + Opts.omit, + Opts.help, + ], + usage = "[options] [modules]", + description = "Make annotated copies of the given files, marking " + "statements that are executed with > and statements that are " + "missed with !." + ), + + 'help': CmdOptionParser("help", [Opts.help], + usage = "[command]", + description = "Describe how to use coverage.py" + ), + + 'html': CmdOptionParser("html", + [ + Opts.directory, + Opts.ignore_errors, + Opts.omit, + Opts.help, + ], + usage = "[options] [modules]", + description = "Create an HTML report of the coverage of the files. " + "Each file gets its own page, with the source decorated to show " + "executed, excluded, and missed lines." + ), + + 'combine': CmdOptionParser("combine", [Opts.help], + usage = " ", + description = "Combine data from multiple coverage files collected " + "with 'run -p'. The combined results are written to a single " + "file representing the union of the data." + ), + + 'debug': CmdOptionParser("debug", [Opts.help], + usage = "<topic>", + description = "Display information on the internals of coverage.py, " + "for diagnosing problems. " + "Topics are 'data' to show a summary of the collected data, " + "or 'sys' to show installation information." + ), + + 'erase': CmdOptionParser("erase", [Opts.help], + usage = " ", + description = "Erase previously collected coverage data." + ), -from execfile import run_python_file + 'report': CmdOptionParser("report", + [ + Opts.ignore_errors, + Opts.omit, + Opts.show_missing, + Opts.help, + ], + usage = "[options] [modules]", + description = "Report coverage statistics on modules." + ), + + 'run': CmdOptionParser("execute", + [ + Opts.append, + Opts.branch, + Opts.pylib, + Opts.parallel_mode, + Opts.timid, + Opts.help, + ], + defaults = {'erase_first': True}, + cmd = "run", + usage = "[options] <pyfile> [program options]", + description = "Run a Python program, measuring code execution." + ), + + 'xml': CmdOptionParser("xml", + [ + Opts.ignore_errors, + Opts.omit, + Opts.output_xml, + Opts.help, + ], + cmd = "xml", + defaults = {'outfile': 'coverage.xml'}, + usage = "[options] [modules]", + description = "Generate an XML report of coverage results." + ), + } + + +OK, ERR = 0, 1 + + +class CoverageScript(object): + """The command-line interface to Coverage.""" + + def __init__(self, _covpkg=None, _run_python_file=None, _help_fn=None): + # _covpkg is for dependency injection, so we can test this code. + if _covpkg: + self.covpkg = _covpkg + else: + import coverage + self.covpkg = coverage + + # _run_python_file is for dependency injection also. + self.run_python_file = _run_python_file or run_python_file + + # _help_fn is for dependency injection. + self.help_fn = _help_fn or self.help + + self.coverage = None + + def help(self, error=None, topic=None, parser=None): + """Display an error message, or the named topic.""" + assert error or topic or parser + if error: + print(error) + print("Use 'coverage help' for help.") + elif parser: + print(parser.format_help().strip()) + else: + # Parse out the topic we want from HELP_TOPICS + topic_list = re.split("(?m)^=+ (\w+) =+$", HELP_TOPICS) + topics = dict(zip(topic_list[1::2], topic_list[2::2])) + help_msg = topics.get(topic, '').strip() + if help_msg: + print(help_msg % self.covpkg.__dict__) + else: + print("Don't know topic %r" % topic) + + def command_line(self, argv): + """The bulk of the command line interface to Coverage. + + `argv` is the argument list to process. + + Returns 0 if all is well, 1 if something went wrong. + + """ + # Collect the command-line options. + + if not argv: + self.help_fn(topic='minimum_help') + return OK + + # The command syntax we parse depends on the first argument. Classic + # syntax always starts with an option. + classic = argv[0].startswith('-') + if classic: + parser = ClassicOptionParser() + else: + parser = CMDS.get(argv[0]) + if not parser: + self.help_fn("Unknown command: '%s'" % argv[0]) + return ERR + argv = argv[1:] + + parser.help_fn = self.help_fn + ok, options, args = parser.parse_args(argv) + if not ok: + return ERR + + # Handle help. + if options.help: + if classic: + self.help_fn(topic='help') + else: + self.help_fn(parser=parser) + return OK -USAGE = r""" -Coverage version %(__version__)s + if "help" in options.actions: + if args: + for a in args: + parser = CMDS.get(a) + if parser: + self.help_fn(parser=parser) + else: + self.help_fn(topic=a) + else: + self.help_fn(topic='help') + return OK + + # Handle version. + if options.version: + self.help_fn(topic='version') + return OK + + # Check for conflicts and problems in the options. + for i in ['erase', 'execute']: + for j in ['annotate', 'html', 'report', 'combine']: + if (i in options.actions) and (j in options.actions): + self.help_fn("You can't specify the '%s' and '%s' " + "options at the same time." % (i, j)) + return ERR + + if not options.actions: + self.help_fn( + "You must specify at least one of -e, -x, -c, -r, -a, or -b." + ) + return ERR + args_allowed = ( + 'execute' in options.actions or + 'annotate' in options.actions or + 'html' in options.actions or + 'debug' in options.actions or + 'report' in options.actions or + 'xml' in options.actions + ) + if not args_allowed and args: + self.help_fn("Unexpected arguments: %s" % " ".join(args)) + return ERR + + if 'execute' in options.actions and not args: + self.help_fn("Nothing to do.") + return ERR + + # Do something. + self.coverage = self.covpkg.coverage( + data_suffix = bool(options.parallel_mode), + cover_pylib = options.pylib, + timid = options.timid, + branch = options.branch, + ) + + if 'debug' in options.actions: + if not args: + self.help_fn("What information would you like: data, sys?") + return ERR + for info in args: + if info == 'sys': + print("-- sys ----------------------------------------") + for label, info in self.coverage.sysinfo(): + if isinstance(info, list): + print("%15s:" % label) + for e in info: + print("%15s %s" % ("", e)) + else: + print("%15s: %s" % (label, info)) + elif info == 'data': + print("-- data ---------------------------------------") + self.coverage.load() + print("path: %s" % self.coverage.data.filename) + print("has_arcs: %r" % self.coverage.data.has_arcs()) + summary = self.coverage.data.summary(fullpath=True) + if summary: + filenames = sorted(summary.keys()) + print("\n%d files:" % len(filenames)) + for f in filenames: + print("%s: %d lines" % (f, summary[f])) + else: + print("No data collected") + else: + self.help_fn("Don't know what you mean by %r" % info) + return ERR + return OK + + if 'erase' in options.actions or options.erase_first: + self.coverage.erase() + else: + self.coverage.load() + + if 'execute' in options.actions: + # Run the script. + self.coverage.start() + try: + self.run_python_file(args[0], args) + finally: + self.coverage.stop() + self.coverage.save() + + if 'combine' in options.actions: + self.coverage.combine() + self.coverage.save() + + # Remaining actions are reporting, with some common options. + report_args = { + 'morfs': args, + 'ignore_errors': options.ignore_errors, + } + + omit = None + if options.omit: + omit = options.omit.split(',') + report_args['omit_prefixes'] = omit + + if 'report' in options.actions: + self.coverage.report( + show_missing=options.show_missing, **report_args) + if 'annotate' in options.actions: + self.coverage.annotate( + directory=options.directory, **report_args) + if 'html' in options.actions: + self.coverage.html_report( + directory=options.directory, **report_args) + if 'xml' in options.actions: + outfile = options.outfile + if outfile == '-': + outfile = None + self.coverage.xml_report(outfile=outfile, **report_args) + + return OK + + +HELP_TOPICS = r""" + +== classic ==================================================================== +Coverage.py version %(__version__)s Measure, collect, and report on code coverage in Python programs. Usage: -coverage -x [-p] [-L] MODULE.py [ARG1 ARG2 ...] +coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] Execute the module, passing the given command-line arguments, collecting coverage data. With the -p option, include the machine name and process - ID in the .coverage file name. With -L, measure coverage even inside the - Python installed library, which isn't done by default. + id in the .coverage file name. With -L, measure coverage even inside the + Python installed library, which isn't done by default. With --timid, use a + simpler but slower trace method. coverage -e Erase collected coverage data. @@ -46,151 +548,48 @@ a directory listed in the omit list. e.g. coverage -i -r -o c:\python25,lib\enthought\traits --h Print this help. - Coverage data is saved in the file .coverage by default. Set the COVERAGE_FILE environment variable to save it somewhere else. -""".strip() - - -class CoverageScript: - """The command-line interface to Coverage.""" - - def __init__(self): - import coverage - self.covpkg = coverage - self.coverage = None - - def help(self, error=None): - """Display an error message, or the usage for Coverage.""" - if error: - print error - print "Use -h for help." - else: - print USAGE % self.covpkg.__dict__ - def command_line(self, argv, help_fn=None): - """The bulk of the command line interface to Coverage. - - `argv` is the argument list to process. - `help_fn` is the help function to use when something goes wrong. - - """ - # Collect the command-line options. - help_fn = help_fn or self.help - OK, ERR = 0, 1 - settings = {} - optmap = { - '-a': 'annotate', - '-b': 'html', - '-c': 'combine', - '-d:': 'directory=', - '-e': 'erase', - '-h': 'help', - '-i': 'ignore-errors', - '-L': 'pylib', - '-m': 'show-missing', - '-p': 'parallel-mode', - '-r': 'report', - '-x': 'execute', - '-o:': 'omit=', - } - short_opts = ''.join([o[1:] for o in optmap.keys()]) - long_opts = optmap.values() - options, args = getopt.getopt(argv, short_opts, long_opts) - for o, a in options: - if optmap.has_key(o): - settings[optmap[o]] = True - elif optmap.has_key(o + ':'): - settings[optmap[o + ':']] = a - elif o[2:] in long_opts: - settings[o[2:]] = True - elif o[2:] + '=' in long_opts: - settings[o[2:]+'='] = a +== help ======================================================================= +Coverage.py, version %(__version__)s +Measure, collect, and report on code coverage in Python programs. - if settings.get('help'): - help_fn() - return OK +usage: coverage <command> [options] [args] - # Check for conflicts and problems in the options. - for i in ['erase', 'execute']: - for j in ['annotate', 'html', 'report', 'combine']: - if settings.get(i) and settings.get(j): - help_fn("You can't specify the '%s' and '%s' " - "options at the same time." % (i, j)) - return ERR - - args_needed = (settings.get('execute') - or settings.get('annotate') - or settings.get('html') - or settings.get('report')) - action = (settings.get('erase') - or settings.get('combine') - or args_needed) - if not action: - help_fn( - "You must specify at least one of -e, -x, -c, -r, -a, or -b." - ) - return ERR - if not args_needed and args: - help_fn("Unexpected arguments: %s" % " ".join(args)) - return ERR - - # Do something. - self.coverage = self.covpkg.coverage( - data_suffix = bool(settings.get('parallel-mode')), - cover_pylib = settings.get('pylib') - ) - - if settings.get('erase'): - self.coverage.erase() - else: - self.coverage.load() +Commands: + annotate Annotate source files with execution information. + combine Combine a number of data files. + erase Erase previously collected coverage data. + help Get help on using coverage.py. + html Create an HTML report. + report Report coverage stats on modules. + run Run a Python program and measure code execution. + xml Create an XML report of coverage results. - if settings.get('execute'): - if not args: - help_fn("Nothing to do.") - return ERR - - # Run the script. - self.coverage.start() - try: - run_python_file(args[0], args) - finally: - self.coverage.stop() - self.coverage.save() - - if settings.get('combine'): - self.coverage.combine() - self.coverage.save() +Use "coverage help <command>" for detailed help on any command. +Use "coverage help classic" for help on older command syntax. +For more information, see %(__url__)s - # Remaining actions are reporting, with some common options. - show_missing = settings.get('show-missing') - directory = settings.get('directory=') - report_args = { - 'morfs': args, - 'ignore_errors': settings.get('ignore-errors'), - } +== minimum_help =============================================================== +Code coverage for Python. Use 'coverage help' for help. - omit = settings.get('omit=') - if omit: - omit = omit.split(',') - report_args['omit_prefixes'] = omit - - if settings.get('report'): - self.coverage.report(show_missing=show_missing, **report_args) - if settings.get('annotate'): - self.coverage.annotate(directory=directory, **report_args) - if settings.get('html'): - self.coverage.html_report(directory=directory, **report_args) +== version ==================================================================== +Coverage.py, version %(__version__)s. %(__url__)s - return OK - +""" + def main(): """The main entrypoint to Coverage. - + This is installed as the script entrypoint. - + """ - return CoverageScript().command_line(sys.argv[1:]) \ No newline at end of file + try: + status = CoverageScript().command_line(sys.argv[1:]) + except CoverageException: + _, err, _ = sys.exc_info() + print(err) + status = ERR + return status
--- a/DebugClients/Python/coverage/codeunit.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/codeunit.py Thu Jan 07 13:42:51 2010 +0000 @@ -2,60 +2,67 @@ import glob, os +from coverage.backward import string_class, StringIO +from coverage.misc import CoverageException + + def code_unit_factory(morfs, file_locator, omit_prefixes=None): """Construct a list of CodeUnits from polymorphic inputs. - + `morfs` is a module or a filename, or a list of same. `file_locator` is a FileLocator that can help resolve filenames. `omit_prefixes` is a list of prefixes. CodeUnits that match those prefixes will be omitted from the list. - + Returns a list of CodeUnit objects. - + """ # Be sure we have a list. if not isinstance(morfs, (list, tuple)): morfs = [morfs] - + # On Windows, the shell doesn't expand wildcards. Do it here. globbed = [] for morf in morfs: - if isinstance(morf, basestring) and ('?' in morf or '*' in morf): + if isinstance(morf, string_class) and ('?' in morf or '*' in morf): globbed.extend(glob.glob(morf)) else: globbed.append(morf) morfs = globbed code_units = [CodeUnit(morf, file_locator) for morf in morfs] - + if omit_prefixes: + assert not isinstance(omit_prefixes, string_class) # common mistake prefixes = [file_locator.abs_file(p) for p in omit_prefixes] filtered = [] for cu in code_units: for prefix in prefixes: - if cu.name.startswith(prefix): + if cu.filename.startswith(prefix): break else: filtered.append(cu) - + code_units = filtered return code_units -class CodeUnit: +class CodeUnit(object): """Code unit: a filename or module. - + Instance attributes: - + `name` is a human-readable name for this code unit. `filename` is the os path from which we can read the source. `relative` is a boolean. - + """ def __init__(self, morf, file_locator): + self.file_locator = file_locator + if hasattr(morf, '__file__'): f = morf.__file__ else: @@ -63,14 +70,14 @@ # .pyc files should always refer to a .py instead. if f.endswith('.pyc'): f = f[:-1] - self.filename = file_locator.canonical_filename(f) + self.filename = self.file_locator.canonical_filename(f) if hasattr(morf, '__name__'): n = modname = morf.__name__ self.relative = True else: n = os.path.splitext(morf)[0] - rel = file_locator.relative_filename(n) + rel = self.file_locator.relative_filename(n) if os.path.isabs(n): self.relative = (rel != n) else: @@ -83,18 +90,36 @@ def __repr__(self): return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename) - def __cmp__(self, other): - return cmp(self.name, other.name) + # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all + # of them defined. + + def __lt__(self, other): + return self.name < other.name + + def __le__(self, other): + return self.name <= other.name + + def __eq__(self, other): + return self.name == other.name + + def __ne__(self, other): + return self.name != other.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return self.name >= other.name def flat_rootname(self): """A base for a flat filename to correspond to this code unit. - + Useful for writing files about the code where you want all the files in the same directory, but need to differentiate same-named files from different directories. - + For example, the file a/b/c.py might return 'a_b_c' - + """ if self.modname: return self.modname.replace('.', '_') @@ -104,4 +129,16 @@ def source_file(self): """Return an open file for reading the source of the code unit.""" - return open(self.filename) + if os.path.exists(self.filename): + # A regular text file: open it. + return open(self.filename) + + # Maybe it's in a zip file? + source = self.file_locator.get_zip_data(self.filename) + if source is not None: + return StringIO(source) + + # Couldn't find source. + raise CoverageException( + "No source for code %r." % self.filename + )
--- a/DebugClients/Python/coverage/collector.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/collector.py Thu Jan 07 13:42:51 2010 +0000 @@ -4,106 +4,164 @@ try: # Use the C extension code when we can, for speed. - from tracer import Tracer + from coverage.tracer import Tracer except ImportError: # Couldn't import the C extension, maybe it isn't built. + Tracer = None - class Tracer: - """Python implementation of the raw data tracer.""" - def __init__(self): - self.data = None - self.should_trace = None - self.should_trace_cache = None - self.cur_filename = None - self.filename_stack = [] + +class PyTracer(object): + """Python implementation of the raw data tracer.""" + + # Because of poor implementations of trace-function-manipulating tools, + # the Python trace function must be kept very simple. In particular, there + # must be only one function ever set as the trace function, both through + # sys.settrace, and as the return value from the trace function. Put + # another way, the trace function must always return itself. It cannot + # swap in other functions, or return None to avoid tracing a particular + # frame. + # + # The trace manipulator that introduced this restriction is DecoratorTools, + # which sets a trace function, and then later restores the pre-existing one + # by calling sys.settrace with a function it found in the current frame. + # + # Systems that use DecoratorTools (or similar trace manipulations) must use + # PyTracer to get accurate results. The command-line --timid argument is + # used to force the use of this tracer. + + def __init__(self): + self.data = None + self.should_trace = None + self.should_trace_cache = None + self.cur_file_data = None + self.last_line = 0 + self.data_stack = [] + self.last_exc_back = None + self.arcs = False + + def _trace(self, frame, event, arg_unused): + """The trace function passed to sys.settrace.""" + + #print "trace event: %s %r @%d" % ( + # event, frame.f_code.co_filename, frame.f_lineno) + + if self.last_exc_back: + if frame == self.last_exc_back: + # Someone forgot a return event. + if self.arcs and self.cur_file_data: + self.cur_file_data[(self.last_line, -1)] = None + self.cur_file_data, self.last_line = self.data_stack.pop() self.last_exc_back = None - def _global_trace(self, frame, event, arg_unused): - """The trace function passed to sys.settrace.""" - if event == 'call': - # Entering a new function context. Decide if we should trace - # in this file. - filename = frame.f_code.co_filename - tracename = self.should_trace_cache.get(filename) - if tracename is None: - tracename = self.should_trace(filename, frame) - self.should_trace_cache[filename] = tracename - if tracename: - # We need to trace. Push the current filename on the stack - # and record the new current filename. - self.filename_stack.append(self.cur_filename) - self.cur_filename = tracename - # Use _local_trace for tracing within this function. - return self._local_trace + if event == 'call': + # Entering a new function context. Decide if we should trace + # in this file. + self.data_stack.append((self.cur_file_data, self.last_line)) + filename = frame.f_code.co_filename + tracename = self.should_trace(filename, frame) + if tracename: + if tracename not in self.data: + self.data[tracename] = {} + self.cur_file_data = self.data[tracename] + else: + self.cur_file_data = None + self.last_line = -1 + elif event == 'line': + # Record an executed line. + if self.cur_file_data is not None: + if self.arcs: + #print "lin", self.last_line, frame.f_lineno + self.cur_file_data[(self.last_line, frame.f_lineno)] = None else: - # No tracing in this function. - return None - return self._global_trace - - def _local_trace(self, frame, event, arg_unused): - """The trace function used within a function.""" - if self.last_exc_back: - if frame == self.last_exc_back: - # Someone forgot a return event. - self.cur_filename = self.filename_stack.pop() - self.last_exc_back = None - - if event == 'line': - # Record an executed line. - self.data[(self.cur_filename, frame.f_lineno)] = True - elif event == 'return': - # Leaving this function, pop the filename stack. - self.cur_filename = self.filename_stack.pop() - elif event == 'exception': - self.last_exc_back = frame.f_back - return self._local_trace - - def start(self): - """Start this Tracer.""" - sys.settrace(self._global_trace) - - def stop(self): - """Stop this Tracer.""" - sys.settrace(None) + #print "lin", frame.f_lineno + self.cur_file_data[frame.f_lineno] = None + self.last_line = frame.f_lineno + elif event == 'return': + if self.arcs and self.cur_file_data: + self.cur_file_data[(self.last_line, -1)] = None + # Leaving this function, pop the filename stack. + self.cur_file_data, self.last_line = self.data_stack.pop() + elif event == 'exception': + #print "exc", self.last_line, frame.f_lineno + self.last_exc_back = frame.f_back + return self._trace + + def start(self): + """Start this Tracer.""" + sys.settrace(self._trace) + + def stop(self): + """Stop this Tracer.""" + sys.settrace(None) + + def get_stats(self): + """Return a dictionary of statistics, or None.""" + return None -class Collector: +class Collector(object): """Collects trace data. - Creates a Tracer object for each thread, since they track stack information. - Each Tracer points to the same shared data, contributing traced data points. - + Creates a Tracer object for each thread, since they track stack + information. Each Tracer points to the same shared data, contributing + traced data points. + When the Collector is started, it creates a Tracer for the current thread, and installs a function to create Tracers for each new thread started. When the Collector is stopped, all active Tracers are stopped. - + Threads started while the Collector is stopped will never have Tracers associated with them. - + """ - + # The stack of active Collectors. Collectors are added here when started, # and popped when stopped. Collectors on the stack are paused when not # the top, and resumed when they become the top again. _collectors = [] - def __init__(self, should_trace): + def __init__(self, should_trace, timid, branch): """Create a collector. - + `should_trace` is a function, taking a filename, and returning a canonicalized filename, or False depending on whether the file should be traced or not. - + + If `timid` is true, then a slower simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions make the faster more sophisticated trace function not + operate properly. + + If `branch` is true, then branches will be measured. This involves + collecting data on which statements followed each other (arcs). Use + `get_arc_data` to get the arc data. + """ self.should_trace = should_trace + self.branch = branch self.reset() + if timid: + # Being timid: use the simple Python trace function. + self._trace_class = PyTracer + else: + # Being fast: use the C Tracer if it is available, else the Python + # trace function. + self._trace_class = Tracer or PyTracer + + def __repr__(self): + return "<Collector at 0x%x>" % id(self) + + def tracer_name(self): + """Return the class name of the tracer we're using.""" + return self._trace_class.__name__ + def reset(self): """Clear collected data, and prepare to collect more.""" - # A dictionary with an entry for (Python source file name, line number - # in that file) if that line has been executed. + # A dictionary mapping filenames to dicts with linenumber keys, + # or mapping filenames to dicts with linenumber pairs as keys. self.data = {} - + # A cache of the results from should_trace, the decision about whether # to trace execution in a file. A dict of filename to (filename or # False). @@ -114,8 +172,9 @@ def _start_tracer(self): """Start a new Tracer object, and store it in self.tracers.""" - tracer = Tracer() + tracer = self._trace_class() tracer.data = self.data + tracer.arcs = self.branch tracer.should_trace = self.should_trace tracer.should_trace_cache = self.should_trace_cache tracer.start() @@ -141,6 +200,7 @@ if self._collectors: self._collectors[-1].pause() self._collectors.append(self) + #print >>sys.stderr, "Started: %r" % self._collectors # Install the tracer on this thread. self._start_tracer() # Install our installation tracer in threading, to jump start other @@ -149,14 +209,13 @@ def stop(self): """Stop collecting trace information.""" + #print >>sys.stderr, "Stopping: %r" % self._collectors assert self._collectors assert self._collectors[-1] is self - - for tracer in self.tracers: - tracer.stop() + + self.pause() self.tracers = [] - threading.settrace(None) - + # Remove this Collector from the stack, and resume the one underneath # (if any). self._collectors.pop() @@ -167,14 +226,48 @@ """Pause tracing, but be prepared to `resume`.""" for tracer in self.tracers: tracer.stop() + stats = tracer.get_stats() + if stats: + print("\nCoverage.py tracer stats:") + for k in sorted(stats.keys()): + print("%16s: %s" % (k, stats[k])) threading.settrace(None) - + def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() threading.settrace(self._installation_trace) - def data_points(self): - """Return the (filename, lineno) pairs collected.""" - return self.data.keys() \ No newline at end of file + def get_line_data(self): + """Return the line data collected. + + Data is { filename: { lineno: None, ...}, ...} + + """ + if self.branch: + # If we were measuring branches, then we have to re-build the dict + # to show line data. + line_data = {} + for f, arcs in self.data.items(): + line_data[f] = ldf = {} + for l1, _ in arcs: + if l1: + ldf[l1] = None + return line_data + else: + return self.data + + def get_arc_data(self): + """Return the arc data collected. + + Data is { filename: { (l1, l2): None, ...}, ...} + + Note that no data is collected or returned if the Collector wasn't + created with `branch` true. + + """ + if self.branch: + return self.data + else: + return {}
--- a/DebugClients/Python/coverage/control.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/control.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,63 +1,79 @@ """Core control stuff for Coverage.""" -import os, socket +import atexit, os, socket -from annotate import AnnotateReporter -from codeunit import code_unit_factory -from collector import Collector -from data import CoverageData -from files import FileLocator -from html import HtmlReporter -from misc import format_lines, CoverageException -from summary import SummaryReporter +from coverage.annotate import AnnotateReporter +from coverage.backward import string_class # pylint: disable-msg=W0622 +from coverage.codeunit import code_unit_factory, CodeUnit +from coverage.collector import Collector +from coverage.data import CoverageData +from coverage.files import FileLocator +from coverage.html import HtmlReporter +from coverage.results import Analysis +from coverage.summary import SummaryReporter +from coverage.xmlreport import XmlReporter -class coverage: +class coverage(object): """Programmatic access to Coverage. To use:: - + from coverage import coverage - + cov = coverage() cov.start() - #.. blah blah (run your code) blah blah + #.. blah blah (run your code) blah blah .. cov.stop() cov.html_report(directory='covhtml') """ + def __init__(self, data_file=None, data_suffix=False, cover_pylib=False, - auto_data=False): - """Create a new coverage measurement context. - + auto_data=False, timid=False, branch=False): + """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. - + `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. - + If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. - + + If `timid` is true, then a slower and simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions breaks the faster trace function. + + If `branch` is true, then branch coverage will be measured in addition + to the usual statement coverage. + """ from coverage import __version__ - + self.cover_pylib = cover_pylib self.auto_data = auto_data - + self.atexit_registered = False + self.exclude_re = "" self.exclude_list = [] - + self.file_locator = FileLocator() - - self.collector = Collector(self._should_trace) + + # Timidity: for nose users, read an environment variable. This is a + # cheap hack, since the rest of the command line arguments aren't + # recognized, but it solves some users' problems. + timid = timid or ('--timid' in os.environ.get('COVERAGE_OPTIONS', '')) + self.collector = Collector( + self._should_trace, timid=timid, branch=branch + ) # Create the data file. if data_suffix: - if not isinstance(data_suffix, basestring): + if not isinstance(data_suffix, string_class): # if data_suffix=True, use .machinename.pid data_suffix = ".%s.%s" % (socket.gethostname(), os.getpid()) else: @@ -73,18 +89,25 @@ # The prefix for files considered "installed with the interpreter". if not self.cover_pylib: + # Look at where the "os" module is located. That's the indication + # for "installed with the interpreter". os_file = self.file_locator.canonical_filename(os.__file__) self.pylib_prefix = os.path.split(os_file)[0] + # To avoid tracing the coverage code itself, we skip anything located + # where we are. here = self.file_locator.canonical_filename(__file__) self.cover_prefix = os.path.split(here)[0] def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename` - + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + Returns a canonicalized filename if it should be traced, False if it should not. - + """ if filename == '<string>': # There's no point in ever tracing string executions, we can't do @@ -119,11 +142,20 @@ return canonical + # To log what should_trace returns, change this to "if 1:" + if 0: + _real_should_trace = _should_trace + def _should_trace(self, filename, frame): # pylint: disable-msg=E0102 + """A logging decorator around the real _should_trace function.""" + ret = self._real_should_trace(filename, frame) + print("should_trace: %r -> %r" % (filename, ret)) + return ret + def use_cache(self, usecache): """Control the use of a data file (incorrectly called a cache). - + `usecache` is true or false, whether to read and write data on disk. - + """ self.data.usefile(usecache) @@ -131,16 +163,17 @@ """Load previously-collected coverage data from the data file.""" self.collector.reset() self.data.read() - + def start(self): """Start measuring code coverage.""" if self.auto_data: self.load() # Save coverage data when Python exits. - import atexit - atexit.register(self.save) + if not self.atexit_registered: + atexit.register(self.save) + self.atexit_registered = True self.collector.start() - + def stop(self): """Stop measuring code coverage.""" self.collector.stop() @@ -148,10 +181,10 @@ def erase(self): """Erase previously-collected coverage data. - + This removes the in-memory data collected in this session as well as discarding the data file. - + """ self.collector.reset() self.data.erase() @@ -163,12 +196,12 @@ def exclude(self, regex): """Exclude source lines from execution consideration. - + `regex` is a regular expression. Lines matching this expression are not considered executable when reporting code coverage. A list of regexes is maintained; this function adds a new regex to the list. Matching any of the regexes excludes a source line. - + """ self.exclude_list.append(regex) self.exclude_re = "(" + ")|(".join(self.exclude_list) + ")" @@ -184,17 +217,18 @@ def combine(self): """Combine together a number of similarly-named coverage data files. - + All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. - + """ self.data.combine_parallel_data() def _harvest_data(self): - """Get the collected data by filename and reset the collector.""" - self.data.add_line_data(self.collector.data_points()) + """Get the collected data and reset the collector.""" + self.data.add_line_data(self.collector.get_line_data()) + self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() # Backward compatibility with version 1. @@ -205,74 +239,45 @@ def analysis2(self, morf): """Analyze a module. - + `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: - + * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. - * A list of line numbers of statements not run (missing from execution). + * A list of line numbers of statements not run (missing from + execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ - code_unit = code_unit_factory(morf, self.file_locator)[0] - st, ex, m, mf = self._analyze(code_unit) - return code_unit.filename, st, ex, m, mf + analysis = self._analyze(morf) + return ( + analysis.filename, analysis.statements, analysis.excluded, + analysis.missing, analysis.missing_formatted() + ) - def _analyze(self, code_unit): - """Analyze a single code unit. - - Returns a 4-tuple: (statements, excluded, missing, missing formatted). + def _analyze(self, it): + """Analyze a single morf or code unit. + + Returns an `Analysis` object. """ - from parser import CodeParser - - filename = code_unit.filename - ext = os.path.splitext(filename)[1] - source = None - if ext == '.py': - if not os.path.exists(filename): - source = self.file_locator.get_zip_data(filename) - if not source: - raise CoverageException( - "No source for code '%s'." % code_unit.filename - ) - - parser = CodeParser() - statements, excluded, line_map = parser.parse_source( - text=source, filename=filename, exclude=self.exclude_re - ) + if not isinstance(it, CodeUnit): + it = code_unit_factory(it, self.file_locator)[0] - # Identify missing statements. - missing = [] - execed = self.data.executed_lines(filename) - for line in statements: - lines = line_map.get(line) - if lines: - for l in range(lines[0], lines[1]+1): - if l in execed: - break - else: - missing.append(line) - else: - if line not in execed: - missing.append(line) - - return ( - statements, excluded, missing, format_lines(statements, missing) - ) + return Analysis(self, it) def report(self, morfs=None, show_missing=True, ignore_errors=False, file=None, omit_prefixes=None): # pylint: disable-msg=W0622 """Write a summary report to `file`. - + Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. - + """ reporter = SummaryReporter(self, show_missing, ignore_errors) reporter.report(morfs, outfile=file, omit_prefixes=omit_prefixes) @@ -280,12 +285,12 @@ def annotate(self, morfs=None, directory=None, ignore_errors=False, omit_prefixes=None): """Annotate a list of modules. - + Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". - + """ reporter = AnnotateReporter(self, ignore_errors) reporter.report( @@ -294,8 +299,48 @@ def html_report(self, morfs=None, directory=None, ignore_errors=False, omit_prefixes=None): """Generate an HTML report. - + """ reporter = HtmlReporter(self, ignore_errors) reporter.report( - morfs, directory=directory, omit_prefixes=omit_prefixes) \ No newline at end of file + morfs, directory=directory, omit_prefixes=omit_prefixes) + + def xml_report(self, morfs=None, outfile=None, ignore_errors=False, + omit_prefixes=None): + """Generate an XML report of coverage results. + + The report is compatible with Cobertura reports. + + """ + if outfile: + outfile = open(outfile, "w") + try: + reporter = XmlReporter(self, ignore_errors) + reporter.report( + morfs, omit_prefixes=omit_prefixes, outfile=outfile) + finally: + outfile.close() + + def sysinfo(self): + """Return a list of key,value pairs showing internal information.""" + + import coverage as covmod + import platform, re, sys + + info = [ + ('version', covmod.__version__), + ('coverage', covmod.__file__), + ('cover_prefix', self.cover_prefix), + ('pylib_prefix', self.pylib_prefix), + ('tracer', self.collector.tracer_name()), + ('data_path', self.data.filename), + ('python', sys.version.replace('\n', '')), + ('platform', platform.platform()), + ('cwd', os.getcwd()), + ('path', sys.path), + ('environment', [ + ("%s = %s" % (k, v)) for k, v in os.environ.items() + if re.search("^COV|^PY", k) + ]), + ] + return info
--- a/DebugClients/Python/coverage/data.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/data.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,24 +1,26 @@ """Coverage data for Coverage.""" import os -import cPickle as pickle -from backward import sorted # pylint: disable-msg=W0622 +from coverage.backward import pickle, sorted # pylint: disable-msg=W0622 -class CoverageData: +class CoverageData(object): """Manages collected coverage data, including file storage. - + The data file format is a pickled dict, with these keys: - + * collector: a string identifying the collecting software * lines: a dict mapping filenames to sorted lists of line numbers executed: { 'file1': [17,23,45], 'file2': [1,2,3], ... } - + + * arcs: a dict mapping filenames to sorted lists of line number pairs: + { 'file1': [(17,23), (17,25), (25,26)], ... } + """ - + # Name of the data file (unless environment variable is set). filename_default = ".coverage" @@ -27,9 +29,9 @@ def __init__(self, basename=None, suffix=None, collector=None): """Create a CoverageData. - + `basename` is the name of the file to use for storing data. - + `suffix` is a suffix to append to the base file name. This can be used for multiple or parallel execution, so that many coverage data files can exist simultaneously. @@ -37,73 +39,82 @@ `collector` is a string describing the coverage measurement software. """ - self.basename = basename self.collector = collector - self.suffix = suffix - + self.use_file = True - self.filename = None + + # Construct the filename that will be used for data file storage, if we + # ever do any file storage. + self.filename = (basename or + os.environ.get(self.filename_env, self.filename_default)) + if suffix: + self.filename += suffix + self.filename = os.path.abspath(self.filename) # A map from canonical Python source file name to a dictionary in # which there's an entry for each line number that has been # executed: # # { - # 'filename1.py': { 12: True, 47: True, ... }, + # 'filename1.py': { 12: None, 47: None, ... }, # ... # } # self.lines = {} - + + # A map from canonical Python source file name to a dictionary with an + # entry for each pair of line numbers forming an arc: + # + # { filename: { (l1,l2): None, ... }, ...} + # + self.arcs = {} + def usefile(self, use_file=True): """Set whether or not to use a disk file for data.""" self.use_file = use_file - def _make_filename(self): - """Construct the filename that will be used for data file storage.""" - assert self.use_file - if not self.filename: - self.filename = (self.basename or - os.environ.get(self.filename_env, self.filename_default)) - - if self.suffix: - self.filename += self.suffix - def read(self): """Read coverage data from the coverage data file (if it exists).""" - data = {} if self.use_file: - self._make_filename() - data = self._read_file(self.filename) - self.lines = data + self.lines, self.arcs = self._read_file(self.filename) + else: + self.lines, self.arcs = {}, {} def write(self): """Write the collected coverage data to a file.""" if self.use_file: - self._make_filename() self.write_file(self.filename) def erase(self): """Erase the data, both in this object, and from its file storage.""" if self.use_file: - self._make_filename() if self.filename and os.path.exists(self.filename): os.remove(self.filename) self.lines = {} - + self.arcs = {} + def line_data(self): """Return the map from filenames to lists of line numbers executed.""" return dict( - [(f, sorted(linemap.keys())) for f, linemap in self.lines.items()] + [(f, sorted(lmap.keys())) for f, lmap in self.lines.items()] + ) + + def arc_data(self): + """Return the map from filenames to lists of line number pairs.""" + return dict( + [(f, sorted(amap.keys())) for f, amap in self.arcs.items()] ) def write_file(self, filename): """Write the coverage data to `filename`.""" - # Create the file data. + # Create the file data. data = {} data['lines'] = self.line_data() + arcs = self.arc_data() + if arcs: + data['arcs'] = arcs if self.collector: data['collector'] = self.collector @@ -117,71 +128,123 @@ def read_file(self, filename): """Read the coverage data from `filename`.""" - self.lines = self._read_file(filename) + self.lines, self.arcs = self._read_file(filename) + + def raw_data(self, filename): + """Return the raw pickled data from `filename`.""" + fdata = open(filename, 'rb') + try: + data = pickle.load(fdata) + finally: + fdata.close() + return data def _read_file(self, filename): - """Return the stored coverage data from the given file.""" + """Return the stored coverage data from the given file. + + Returns two values, suitable for assigning to `self.lines` and + `self.arcs`. + + """ + lines = {} + arcs = {} try: - fdata = open(filename, 'rb') - try: - data = pickle.load(fdata) - finally: - fdata.close() + data = self.raw_data(filename) if isinstance(data, dict): # Unpack the 'lines' item. lines = dict([ - (f, dict([(l, True) for l in linenos])) - for f,linenos in data['lines'].items() + (f, dict.fromkeys(linenos, None)) + for f, linenos in data.get('lines', {}).items() ]) - return lines - else: - return {} + # Unpack the 'arcs' item. + arcs = dict([ + (f, dict.fromkeys(arcpairs, None)) + for f, arcpairs in data.get('arcs', {}).items() + ]) except Exception: - return {} + pass + return lines, arcs def combine_parallel_data(self): - """ Treat self.filename as a file prefix, and combine the data from all - of the files starting with that prefix. + """Combine a number of data files together. + + Treat `self.filename` as a file prefix, and combine the data from all + of the data files starting with that prefix. + """ - self._make_filename() data_dir, local = os.path.split(self.filename) for f in os.listdir(data_dir or '.'): if f.startswith(local): full_path = os.path.join(data_dir, f) - new_data = self._read_file(full_path) - for filename, file_data in new_data.items(): + new_lines, new_arcs = self._read_file(full_path) + for filename, file_data in new_lines.items(): self.lines.setdefault(filename, {}).update(file_data) + for filename, file_data in new_arcs.items(): + self.arcs.setdefault(filename, {}).update(file_data) - def add_line_data(self, data_points): + def add_line_data(self, line_data): """Add executed line data. - - `data_points` is (filename, lineno) pairs. - + + `line_data` is { filename: { lineno: None, ... }, ...} + """ - for filename, lineno in data_points: - self.lines.setdefault(filename, {})[lineno] = True + for filename, linenos in line_data.items(): + self.lines.setdefault(filename, {}).update(linenos) + + def add_arc_data(self, arc_data): + """Add measured arc data. + + `arc_data` is { filename: { (l1,l2): None, ... }, ...} + + """ + for filename, arcs in arc_data.items(): + self.arcs.setdefault(filename, {}).update(arcs) def executed_files(self): """A list of all files that had been measured as executed.""" - return self.lines.keys() + return list(self.lines.keys()) def executed_lines(self, filename): """A map containing all the line numbers executed in `filename`. - + If `filename` hasn't been collected at all (because it wasn't executed) then return an empty map. """ return self.lines.get(filename) or {} - def summary(self): + def executed_arcs(self, filename): + """A map containing all the arcs executed in `filename`.""" + return self.arcs.get(filename) or {} + + def summary(self, fullpath=False): """Return a dict summarizing the coverage data. - - Keys are the basename of the filenames, and values are the number of - executed lines. This is useful in the unit tests. - + + Keys are based on the filenames, and values are the number of executed + lines. If `fullpath` is true, then the keys are the full pathnames of + the files, otherwise they are the basenames of the files. + """ summ = {} + if fullpath: + filename_fn = lambda f: f + else: + filename_fn = os.path.basename for filename, lines in self.lines.items(): - summ[os.path.basename(filename)] = len(lines) + summ[filename_fn(filename)] = len(lines) return summ + + def has_arcs(self): + """Does this data have arcs?""" + return bool(self.arcs) + + +if __name__ == '__main__': + # Ad-hoc: show the raw data in a data file. + import pprint, sys + covdata = CoverageData() + if sys.argv[1:]: + fname = sys.argv[1] + else: + fname = covdata.filename + pprint.pprint(covdata.raw_data(fname))
--- a/DebugClients/Python/coverage/doc/CHANGES.txt Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/doc/CHANGES.txt Thu Jan 07 13:42:51 2010 +0000 @@ -3,44 +3,169 @@ ------------------------------ +Version 3.2, 5 December 2009 +---------------------------- + +- Added a --version options on the command line. + + +Version 3.2b4, 1 December 2009 +------------------------------ + +- Branch coverage improvements: + + - The XML report now includes branch information. + +- Click-to-sort HTML report columns are now persisted in a cookie. Viewing + a report will sort it first the way you last had a coverage report sorted. + Thanks, `Chris Adams`_. + +- On Python 3.x, setuptools has been replaced by `Distribute`_. + +.. _Distribute: http://packages.python.org/distribute/ + + +Version 3.2b3, 23 November 2009 +------------------------------- + +- Fixed a memory leak in the C tracer that was introduced in 3.2b1. + +- Branch coverage improvements: + + - Branches to excluded code are ignored. + +- The table of contents in the HTML report is now sortable: click the headers + on any column. Thanks, `Chris Adams`_. + +.. _Chris Adams: http://improbable.org/chris/ + + +Version 3.2b2, 19 November 2009 +------------------------------- + +- Branch coverage improvements: + + - Classes are no longer incorrectly marked as branches: `issue 32`_. + + - "except" clauses with types are no longer incorrectly marked as branches: + `issue 35`_. + +- Fixed some problems syntax coloring sources with line continuations and + source with tabs: `issue 30`_ and `issue 31`_. + +- The --omit option now works much better than before, fixing `issue 14` and + `issue 33`_. Thanks, Danek Duvall. + +.. _issue 14: http://bitbucket.org/ned/coveragepy/issue/14 +.. _issue 30: http://bitbucket.org/ned/coveragepy/issue/30 +.. _issue 31: http://bitbucket.org/ned/coveragepy/issue/31 +.. _issue 32: http://bitbucket.org/ned/coveragepy/issue/32 +.. _issue 33: http://bitbucket.org/ned/coveragepy/issue/33 +.. _issue 35: http://bitbucket.org/ned/coveragepy/issue/35 + + +Version 3.2b1, 10 November 2009 +------------------------------- + +- Branch coverage! + +- XML reporting has file paths that let Cobertura find the source code. + +- The tracer code has changed, it's a few percent faster. + +- Some exceptions reported by the command line interface have been cleaned up + so that tracebacks inside coverage.py aren't shown. Fixes `issue 23`_. + +.. _issue 23: http://bitbucket.org/ned/coveragepy/issue/23 + + +Version 3.1, 4 October 2009 +--------------------------- + +- Source code can now be read from eggs. Thanks, Ross Lawley. Fixes + `issue 25`_. + +.. _issue 25: http://bitbucket.org/ned/coveragepy/issue/25 + + +Version 3.1b1, 27 September 2009 +-------------------------------- + +- Python 3.1 is now supported. + +- Coverage.py has a new command line syntax with sub-commands. This expands + the possibilities for adding features and options in the future. The old + syntax is still supported. Try "coverage help" to see the new commands. + Thanks to Ben Finney for early help. + +- Added an experimental "coverage xml" command for producing coverage reports + in a Cobertura-compatible XML format. Thanks, Bill Hart. + +- Added the --timid option to enable a simpler slower trace function that works + for DecoratorTools projects, including TurboGears. Fixed `issue 12`_ and + `issue 13`_. + +- HTML reports show modules from other directories. Fixed `issue 11`_. + +- HTML reports now display syntax-colored Python source. + +- Programs that change directory will still write .coverage files in the + directory where execution started. Fixed `issue 24`_. + +- Added a "coverage debug" command for getting diagnostic information about the + coverage.py installation. + +.. _issue 11: http://bitbucket.org/ned/coveragepy/issue/11 +.. _issue 12: http://bitbucket.org/ned/coveragepy/issue/12 +.. _issue 13: http://bitbucket.org/ned/coveragepy/issue/13 +.. _issue 24: http://bitbucket.org/ned/coveragepy/issue/24 + + Version 3.0.1, 7 July 2009 -------------------------- - Removed the recursion limit in the tracer function. Previously, code that - ran more than 500 frames deep would crash. + ran more than 500 frames deep would crash. Fixed `issue 9`. - Fixed a bizarre problem involving pyexpat, whereby lines following XML parser - invocations could be overlooked. + invocations could be overlooked. Fixed `issue 10`. - On Python 2.3, coverage.py could mis-measure code with exceptions being raised. This is now fixed. - The coverage.py code itself will now not be measured by coverage.py, and no - coverage modules will be mentioned in the nose --with-cover plugin. + coverage modules will be mentioned in the nose --with-cover plug-in. Fixed + `issue 8`. - When running source files, coverage.py now opens them in universal newline mode just like Python does. This lets it run Windows files on Mac, for example. +.. _issue 9: http://bitbucket.org/ned/coveragepy/issue/9 +.. _issue 10: http://bitbucket.org/ned/coveragepy/issue/10 +.. _issue 8: http://bitbucket.org/ned/coveragepy/issue/8 + Version 3.0, 13 June 2009 ------------------------- - Fixed the way the Python library was ignored. Too much code was being excluded the old way. - + - Tabs are now properly converted in HTML reports. Previously indentation was - lost. + lost. Fixed `issue 6`. - Nested modules now get a proper flat_rootname. Thanks, Christian Heimes. +.. _issue 6: http://bitbucket.org/ned/coveragepy/issue/6 + Version 3.0b3, 16 May 2009 -------------------------- - Added parameters to coverage.__init__ for options that had been set on the coverage object itself. - + - Added clear_exclude() and get_exclude_list() methods for programmatic manipulation of the exclude regexes. @@ -50,7 +175,7 @@ installed after compiling are now located correctly. Thanks, Detlev Offenbach. -- When using the object api (that is, constructing a coverage() object), data +- When using the object API (that is, constructing a coverage() object), data is no longer saved automatically on process exit. You can re-enable it with the auto_data=True parameter on the coverage() constructor. The module-level interface still uses automatic saving. @@ -98,16 +223,16 @@ - Executable lines are identified by reading the line number tables in the compiled code, removing a great deal of complicated analysis code. - + - Precisely which lines are considered executable has changed in some cases. Therefore, your coverage stats may also change slightly. - The singleton coverage object is only created if the module-level functions are used. This maintains the old interface while allowing better programmatic use of Coverage. - + - The minimum supported Python version is 2.3. - + Version 2.85, 14 September 2008 ------------------------------- @@ -115,8 +240,8 @@ - Add support for finding source files in eggs. Don't check for morf's being instances of ModuleType, instead use duck typing so that pseudo-modules can participate. Thanks, Imri Goldberg. - -- Use os.realpath as part of the fixing of filenames so that symlinks won't + +- Use os.realpath as part of the fixing of file names so that symlinks won't confuse things. Thanks, Patrick Mezard. @@ -130,7 +255,7 @@ ------------------------------- - Don't try to predict whether a file is Python source based on the extension. - Extensionless files are often Pythons scripts. Instead, simply parse the file + Extension-less files are often Pythons scripts. Instead, simply parse the file and catch the syntax errors. Hat tip to Ben Finney. @@ -170,7 +295,7 @@ - In reports, ignore code executed from strings, since we can't do anything useful with it anyway. - + - Better file handling on Linux, thanks Guillaume Chazarain. - Better shell support on Windows, thanks Noel O'Boyle. @@ -198,13 +323,13 @@ - Call threading.settrace so that all threads are measured. Thanks Martin Fuzzey. - + - Add a file argument to report so that reports can be captured to a different destination. - coverage.py can now measure itself. -- Adapted Greg Rogers' patch for using relative filenames, and sorting and +- Adapted Greg Rogers' patch for using relative file names, and sorting and omitting files to report on. @@ -232,7 +357,7 @@ - Lines can be excluded from consideration, even entire suites of lines. -- The filesystem cache of covered lines can be disabled programmatically. +- The file system cache of covered lines can be disabled programmatically. - Modernized the code.
--- a/DebugClients/Python/coverage/doc/PKG-INFO Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/doc/PKG-INFO Thu Jan 07 13:42:51 2010 +0000 @@ -1,18 +1,21 @@ Metadata-Version: 1.0 Name: coverage -Version: 3.0.1 +Version: 3.2 Summary: Code coverage measurement for Python Home-page: http://nedbatchelder.com/code/coverage Author: Ned Batchelder Author-email: ned@nedbatchelder.com License: BSD -Description: Coverage measures code coverage, typically during test execution. It uses the - code analysis tools and tracing hooks provided in the Python standard library - to determine which lines are executable, and which have been executed. +Description: Coverage.py measures code coverage, typically during test execution. It uses + the code analysis tools and tracing hooks provided in the Python standard + library to determine which lines are executable, and which have been executed. - Code repository and issue tracker are at - `bitbucket.org <http://bitbucket.org/ned/coveragepy>`_. + Coverage.py runs on Pythons 2.3 through 3.1. + Documentation is at `nedbatchelder.com <http://nedbatchelder.com/code/coverage>`_. Code repository and issue + tracker are at `bitbucket.org <http://bitbucket.org/ned/coveragepy>`_. + + New in 3.2: Branch coverage! Keywords: code coverage testing Platform: UNKNOWN @@ -20,7 +23,8 @@ Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Software Development :: Quality Assurance Classifier: Topic :: Software Development :: Testing Classifier: Development Status :: 5 - Production/Stable
--- a/DebugClients/Python/coverage/doc/README.txt Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/doc/README.txt Thu Jan 07 13:42:51 2010 +0000 @@ -1,8 +1,8 @@ -Coverage: code coverage testing for Python +Coverage.py: code coverage testing for Python -Coverage measures code coverage, typically during test execution. It uses the -code analysis tools and tracing hooks provided in the Python standard library -to determine which lines are executable, and which have been executed. +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. For more information, see http://nedbatchelder.com/code/coverage
--- a/DebugClients/Python/coverage/execfile.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/execfile.py Thu Jan 07 13:42:51 2010 +0000 @@ -2,20 +2,32 @@ import imp, os, sys +from coverage.backward import exec_function +from coverage.misc import NoSource + + +try: + # In Py 2.x, the builtins were in __builtin__ + BUILTINS = sys.modules['__builtin__'] +except KeyError: + # In Py 3.x, they're in builtins + BUILTINS = sys.modules['builtins'] + + def run_python_file(filename, args): """Run a python file as if it were the main program on the command line. - + `filename` is the path to the file to execute, it need not be a .py file. `args` is the argument array to present as sys.argv, including the first element representing the file being executed. - + """ # Create a module to serve as __main__ old_main_mod = sys.modules['__main__'] main_mod = imp.new_module('__main__') sys.modules['__main__'] = main_mod main_mod.__file__ = filename - main_mod.__builtins__ = sys.modules['__builtin__'] + main_mod.__builtins__ = BUILTINS # Set sys.argv and the first path element properly. old_argv = sys.argv @@ -24,12 +36,15 @@ sys.path[0] = os.path.dirname(filename) try: - source = open(filename, 'rU').read() - exec compile(source, filename, "exec") in main_mod.__dict__ + try: + source = open(filename, 'rU').read() + except IOError: + raise NoSource("No file to run: %r" % filename) + exec_function(source, filename, main_mod.__dict__) finally: # Restore the old __main__ sys.modules['__main__'] = old_main_mod - + # Restore the old argv and path sys.argv = old_argv sys.path[0] = old_path0
--- a/DebugClients/Python/coverage/files.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/files.py Thu Jan 07 13:42:51 2010 +0000 @@ -2,7 +2,7 @@ import os, sys -class FileLocator: +class FileLocator(object): """Understand how filenames work.""" def __init__(self): @@ -18,23 +18,23 @@ def relative_filename(self, filename): """Return the relative form of `filename`. - + The filename will be relative to the current directory when the FileLocator was constructed. - + """ return filename.replace(self.relative_dir, "") def canonical_filename(self, filename): """Return a canonical filename for `filename`. - + An absolute path with no redundant components and normalized case. - + """ - if not self.canonical_filename_cache.has_key(filename): + if filename not in self.canonical_filename_cache: f = filename if os.path.isabs(f) and not os.path.exists(f): - if not self.get_zip_data(f): + if self.get_zip_data(f) is None: f = os.path.basename(f) if not os.path.isabs(f): for path in [os.curdir] + sys.path: @@ -48,10 +48,11 @@ def get_zip_data(self, filename): """Get data from `filename` if it is a zip file path. - - Returns the data read from the zip file, or None if no zip file could - be found or `filename` isn't in it. - + + Returns the string data read from the zip file, or None if no zip file + could be found or `filename` isn't in it. The data returned will be + an empty string if the file is empty. + """ import zipimport markers = ['.zip'+os.sep, '.egg'+os.sep] @@ -66,5 +67,7 @@ data = zi.get_data(parts[1]) except IOError: continue + if sys.hexversion > 0x03000000: + data = data.decode('utf8') # TODO: How to do this properly? return data return None
--- a/DebugClients/Python/coverage/html.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/html.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,9 +1,11 @@ """HTML reporting for Coverage.""" import os, re, shutil -from . import __version__ # pylint: disable-msg=W0611 -from report import Reporter -from templite import Templite + +from coverage import __url__, __version__ # pylint: disable-msg=W0611 +from coverage.phystokens import source_token_lines +from coverage.report import Reporter +from coverage.templite import Templite # Disable pylint msg W0612, because a bunch of variables look unused, but # they're accessed in a templite context via locals(). @@ -16,28 +18,29 @@ def data(fname): """Return the contents of a data file of ours.""" return open(data_filename(fname)).read() - + class HtmlReporter(Reporter): """HTML reporting.""" - + def __init__(self, coverage, ignore_errors=False): super(HtmlReporter, self).__init__(coverage, ignore_errors) self.directory = None self.source_tmpl = Templite(data("htmlfiles/pyfile.html"), globals()) - + self.files = [] + self.arcs = coverage.data.has_arcs() def report(self, morfs, directory, omit_prefixes=None): """Generate an HTML report for `morfs`. - + `morfs` is a list of modules or filenames. `directory` is where to put the HTML files. `omit_prefixes` is a list of strings, prefixes of modules to omit from the report. - + """ assert directory, "must provide a directory for html reporting" - + # Process all the files. self.report_files(self.html_file, morfs, directory, omit_prefixes) @@ -45,57 +48,79 @@ self.index_file() # Create the once-per-directory files. - shutil.copyfile( - data_filename("htmlfiles/style.css"), - os.path.join(directory, "style.css") - ) - shutil.copyfile( - data_filename("htmlfiles/jquery-1.3.2.min.js"), - os.path.join(directory, "jquery-1.3.2.min.js") - ) + for static in [ + "style.css", "coverage_html.js", + "jquery-1.3.2.min.js", "jquery.tablesorter.min.js" + ]: + shutil.copyfile( + data_filename("htmlfiles/" + static), + os.path.join(directory, static) + ) - def html_file(self, cu, statements, excluded, missing): + def html_file(self, cu, analysis): """Generate an HTML file for one source file.""" - - source = cu.source_file() - source_lines = source.readlines() - - n_lin = len(source_lines) - n_stm = len(statements) - n_exc = len(excluded) - n_mis = len(missing) - n_run = n_stm - n_mis - if n_stm > 0: - pc_cov = 100.0 * n_run / n_stm - else: - pc_cov = 100.0 + + source = cu.source_file().read() + + nums = analysis.numbers + + missing_branch_arcs = analysis.missing_branch_arcs() + n_par = 0 # accumulated below. + arcs = self.arcs # These classes determine which lines are highlighted by default. - c_run = " run hide" + c_run = " run hide_run" c_exc = " exc" c_mis = " mis" - + c_par = " par" + c_run + lines = [] - for lineno, line in enumerate(source_lines): - lineno += 1 # enum is 0-based, lines are 1-based. - - - css_class = "" - if lineno in statements: - css_class += " stm" - if lineno not in missing and lineno not in excluded: - css_class += c_run - if lineno in excluded: - css_class += c_exc - if lineno in missing: - css_class += c_mis - - lineinfo = { - 'text': line, + + for lineno, line in enumerate(source_token_lines(source)): + lineno += 1 # 1-based line numbers. + # Figure out how to mark this line. + line_class = "" + annotate_html = "" + annotate_title = "" + if lineno in analysis.statements: + line_class += " stm" + if lineno in analysis.excluded: + line_class += c_exc + elif lineno in analysis.missing: + line_class += c_mis + elif self.arcs and lineno in missing_branch_arcs: + line_class += c_par + n_par += 1 + annlines = [] + for b in missing_branch_arcs[lineno]: + if b == -1: + annlines.append("exit") + else: + annlines.append(str(b)) + annotate_html = " ".join(annlines) + if len(annlines) > 1: + annotate_title = "no jumps to these line numbers" + elif len(annlines) == 1: + annotate_title = "no jump to this line number" + elif lineno in analysis.statements: + line_class += c_run + + # Build the HTML for the line + html = "" + for tok_type, tok_text in line: + if tok_type == "ws": + html += escape(tok_text) + else: + tok_html = escape(tok_text) or ' ' + html += "<span class='%s'>%s</span>" % (tok_type, tok_html) + + lines.append({ + 'html': html, 'number': lineno, - 'class': css_class.strip() or "pln" - } - lines.append(lineinfo) + 'class': line_class.strip() or "pln", + 'annotate': annotate_html, + 'annotate_title': annotate_title, + }) # Write the HTML page for this file. html_filename = cu.flat_rootname() + ".html" @@ -107,11 +132,8 @@ # Save this file's information for the index file. self.files.append({ - 'stm': n_stm, - 'run': n_run, - 'exc': n_exc, - 'mis': n_mis, - 'pc_cov': pc_cov, + 'nums': nums, + 'par': n_par, 'html_filename': html_filename, 'cu': cu, }) @@ -121,51 +143,40 @@ index_tmpl = Templite(data("htmlfiles/index.html"), globals()) files = self.files - - total_stm = sum([f['stm'] for f in files]) - total_run = sum([f['run'] for f in files]) - total_exc = sum([f['exc'] for f in files]) - if total_stm: - total_cov = 100.0 * total_run / total_stm - else: - total_cov = 100.0 + arcs = self.arcs + + totals = sum([f['nums'] for f in files]) fhtml = open(os.path.join(self.directory, "index.html"), "w") fhtml.write(index_tmpl.render(locals())) fhtml.close() -# Helpers for templates +# Helpers for templates and generating HTML def escape(t): """HTML-escape the text in t.""" return (t - # Change all tabs to 4 spaces. - .expandtabs(4) # Convert HTML special chars into HTML entities. .replace("&", "&").replace("<", "<").replace(">", ">") .replace("'", "'").replace('"', """) - # Convert runs of spaces: " " -> " " + # Convert runs of spaces: "......" -> " . . ." .replace(" ", " ") # To deal with odd-length runs, convert the final pair of spaces - # so that " " -> " " + # so that "....." -> " . ." .replace(" ", " ") ) -def not_empty(t): - """Make sure HTML content is not completely empty.""" - return t or " " - def format_pct(p): """Format a percentage value for the HTML reports.""" return "%.0f" % p def spaceless(html): """Squeeze out some annoying extra space from an HTML string. - + Nicely-formatted templates mean lots of extra space in the result. Get rid of some. - + """ html = re.sub(">\s+<p ", ">\n<p ", html) - return html \ No newline at end of file + return html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/DebugClients/Python/coverage/htmlfiles/coverage_html.js Thu Jan 07 13:42:51 2010 +0000 @@ -0,0 +1,64 @@ +// Coverage.py HTML report browser code. + +// Loaded on index.html +function index_page_ready($) { + // Look for a cookie containing previous sort settings: + sort_list = []; + cookie_name = "COVERAGE_INDEX_SORT"; + + // This almost makes it worth installing the jQuery cookie plugin: + if (document.cookie.indexOf(cookie_name) > -1) { + cookies = document.cookie.split(";"); + for (var i=0; i < cookies.length; i++) { + parts = cookies[i].split("=") + + if ($.trim(parts[0]) == cookie_name && parts[1]) { + sort_list = eval("[[" + parts[1] + "]]"); + break; + } + } + } + + // Create a new widget which exists only to save and restore + // the sort order: + $.tablesorter.addWidget({ + id: "persistentSort", + + // Format is called by the widget before displaying: + format: function(table) { + if (table.config.sortList.length == 0 && sort_list.length > 0) { + // This table hasn't been sorted before - we'll use + // our stored settings: + jQuery(table).trigger('sorton', [sort_list]); + } + else { + // This is not the first load - something has + // already defined sorting so we'll just update + // our stored value to match: + sort_list = table.config.sortList; + } + } + }); + + // Configure our tablesorter to handle the variable number of + // columns produced depending on report options: + var headers = {}; + var col_count = jQuery("table.index > thead > tr > th").length; + + headers[0] = { sorter: 'text' }; + for (var i = 1; i < col_count-1; i++) { + headers[i] = { sorter: 'digit' }; + } + headers[col_count-1] = { sorter: 'percent' }; + + // Enable the table sorter: + $("table.index").tablesorter({ + widgets: ['persistentSort'], + headers: headers + }); + + // Watch for page unload events so we can save the final sort settings: + $(window).unload(function() { + document.cookie = cookie_name + "=" + sort_list.toString() + "; path=/" + }); +}
--- a/DebugClients/Python/coverage/htmlfiles/index.html Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/htmlfiles/index.html Thu Jan 07 13:42:51 2010 +0000 @@ -1,54 +1,80 @@ -<!doctype html PUBLIC "-//W3C//DTD html 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> -<head> -<title>Coverage report</title> -<link rel='stylesheet' href='style.css' type='text/css'> -</head> -<body> + <head> + <meta http-equiv='Content-Type' content='text/html; charset=utf-8'> + <title>Coverage report</title> + <link rel='stylesheet' href='style.css' type='text/css'> + <script type='text/javascript' src='jquery-1.3.2.min.js'></script> + <script type='text/javascript' src='jquery.tablesorter.min.js'></script> + <script type='text/javascript' src='coverage_html.js'></script> + <script type="text/javascript" charset="utf-8"> + jQuery(document).ready(index_page_ready); + </script> + </head> + <body> -<div id='header'> - <div class='content'> - <h1>Coverage report: - <span class='pc_cov'>{{total_cov|format_pct}}%</span> - </h1> - </div> -</div> + <div id='header'> + <div class='content'> + <h1>Coverage report: + <span class='pc_cov'>{{totals.pc_covered|format_pct}}%</span> + </h1> + </div> + </div> -<div id='index'> -<table class='index'> -<tr> - <th class='name'>Module</th> - <th>statements</th> - <th>run</th> - <th>excluded</th> - <th>coverage</th> -</tr> -{% for file in files %} -<tr> - <td class='name'><a href='{{file.html_filename}}'>{{file.cu.name}}</a></td> - <td>{{file.stm}}</td> - <td>{{file.run}}</td> - <td>{{file.exc}}</td> - <td>{{file.pc_cov|format_pct}}%</td> -</tr> -{% endfor %} -<tr class='total'> -<td class='name'>Total</td> -<td>{{total_stm}}</td> -<td>{{total_run}}</td> -<td>{{total_exc}}</td> -<td>{{total_cov|format_pct}}%</td> -</tr> -</table> -</div> + <div id='index'> + <table class='index'> + <thead> + {# The title='' attr doesn't work in Safari. #} + <tr class='tablehead' title='Click to sort'> + <th class='name left headerSortDown'>Module</th> + <th>statements</th> + <th>run</th> + <th>excluded</th> + {% if arcs %} + <th>branches</th> + <th>br exec</th> + {% endif %} + <th class='right'>coverage</th> + </tr> + </thead> + {# HTML syntax requires thead, tfoot, tbody #} + <tfoot> + <tr class='total'> + <td class='name left'>Total</td> + <td>{{totals.n_statements}}</td> + <td>{{totals.n_executed}}</td> + <td>{{totals.n_excluded}}</td> + {% if arcs %} + <td>{{totals.n_branches}}</td> + <td>{{totals.n_executed_branches}}</td> + {% endif %} + <td class='right'>{{totals.pc_covered|format_pct}}%</td> + </tr> + </tfoot> + <tbody> + {% for file in files %} + <tr class='file'> + <td class='name left'><a href='{{file.html_filename}}'>{{file.cu.name}}</a></td> + <td>{{file.nums.n_statements}}</td> + <td>{{file.nums.n_executed}}</td> + <td>{{file.nums.n_excluded}}</td> + {% if arcs %} + <td>{{file.nums.n_branches}}</td> + <td>{{file.nums.n_executed_branches}}</td> + {% endif %} + <td class='right'>{{file.nums.pc_covered|format_pct}}%</td> + </tr> + {% endfor %} + </tbody> + </table> + </div> -<div id='footer'> - <div class='content'> - <p> - <a class='nav' href='http://nedbatchelder.com/code/coverage'>coverage v{{__version__}}</a> - </p> - </div> -</div> - -</body> + <div id='footer'> + <div class='content'> + <p> + <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a> + </p> + </div> + </div> + </body> </html>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/DebugClients/Python/coverage/htmlfiles/jquery.tablesorter.min.js Thu Jan 07 13:42:51 2010 +0000 @@ -0,0 +1,2 @@ + +(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,cells[i]);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,node){var l=parsers.length;for(var i=1;i<l;i++){if(parsers[i].is($.trim(getElementText(table.config,node)),table,node)){return parsers[i];}}return parsers[0];}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=table.tBodies[0].rows[i],cols=[];cache.row.push($(c));for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c.cells[j]),table,c.cells[j]));}cols.push(i);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){if(!node)return"";var t="";if(config.textExtraction=="simple"){if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){t=node.childNodes[0].innerHTML;}else{t=node.innerHTML;}}else{if(typeof(config.textExtraction)=="function"){t=config.textExtraction(node);}else{t=$(node).text();}}return t;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){rows.push(r[n[i][checkCell]]);if(!table.config.appender){var o=r[n[i][checkCell]];var l=o.length;for(var j=0;j<l;j++){tableBody[0].appendChild(o[j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false,tableHeadersRows=[];for(var i=0;i<table.tHead.rows.length;i++){tableHeadersRows[i]=0;};$tableHeaders=$("thead th",table);$tableHeaders.each(function(index){this.count=0;this.column=index;this.order=formatSortingOrder(table.config.sortInitialOrder);if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(!this.sortDisabled){$(this).addClass(table.config.cssHeader);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){i=(v.toLowerCase()=="desc")?1:0;}else{i=(v==(0||1))?v:0;}return i;}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(getCachedSortType(table.config.parsers,c)=="text")?((order==0)?"sortText":"sortTextDesc"):((order==0)?"sortNumeric":"sortNumericDesc");var e="e"+i;dynamicExp+="var "+e+" = "+s+"(a["+c+"],b["+c+"]); ";dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function sortText(a,b){return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){var DECIMAL='\\'+config.decimal;var exp='/(^[+]?0('+DECIMAL+'0+)?$)|(^([-+]?[1-9][0-9]*)$)|(^([-+]?((0?|[1-9][0-9]*)'+DECIMAL+'(0*[1-9][0-9]*)))$)|(^[-+]?[1-9]+[0-9]*'+DECIMAL+'0+$)/';return RegExp(exp).test($.trim(s));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[^0-9.]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}$("tr:visible",table.tBodies[0]).filter(':even').removeClass(table.config.widgetZebra.css[1]).addClass(table.config.widgetZebra.css[0]).end().filter(':odd').removeClass(table.config.widgetZebra.css[0]).addClass(table.config.widgetZebra.css[1]);if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery); \ No newline at end of file
--- a/DebugClients/Python/coverage/htmlfiles/pyfile.html Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/htmlfiles/pyfile.html Thu Jan 07 13:42:51 2010 +0000 @@ -1,19 +1,21 @@ -<!doctype html PUBLIC "-//W3C//DTD html 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> +<meta http-equiv='Content-Type' content='text/html; charset=utf-8'> <title>Coverage for {{cu.name|escape}}</title> <link rel='stylesheet' href='style.css' type='text/css'> -<script src='jquery-1.3.2.min.js'></script> -<script> +<script type='text/javascript' src='jquery-1.3.2.min.js'></script> +<script type='text/javascript'> function toggle_lines(btn, cls) { var btn = $(btn); - if (btn.hasClass("hide")) { - $("#source ."+cls).removeClass("hide"); - btn.removeClass("hide"); + var hide = "hide_"+cls; + if (btn.hasClass(hide)) { + $("#source ."+cls).removeClass(hide); + btn.removeClass(hide); } else { - $("#source ."+cls).addClass("hide"); - btn.addClass("hide"); + $("#source ."+cls).addClass(hide); + btn.addClass(hide); } } </script> @@ -22,13 +24,16 @@ <div id='header'> <div class='content'> <h1>Coverage for <b>{{cu.name|escape}}</b> : - <span class='pc_cov'>{{pc_cov|format_pct}}%</span> + <span class='pc_cov'>{{nums.pc_covered|format_pct}}%</span> </h1> <h2 class='stats'> - {{n_stm}} statements - <span class='{{c_run.strip}}' onclick='toggle_lines(this, "run")'>{{n_run}} run</span> - <span class='{{c_exc.strip}}' onclick='toggle_lines(this, "exc")'>{{n_exc}} excluded</span> - <span class='{{c_mis.strip}}' onclick='toggle_lines(this, "mis")'>{{n_mis}} missing</span> + {{nums.n_statements}} statements + <span class='{{c_run.strip}}' onclick='toggle_lines(this, "run")'>{{nums.n_executed}} run</span> + <span class='{{c_exc.strip}}' onclick='toggle_lines(this, "exc")'>{{nums.n_excluded}} excluded</span> + <span class='{{c_mis.strip}}' onclick='toggle_lines(this, "mis")'>{{nums.n_missing}} missing</span> + {% if arcs %} + <span class='{{c_par.strip}}' onclick='toggle_lines(this, "par")'>{{n_par}} partial</span> + {% endif %} </h2> </div> </div> @@ -43,7 +48,7 @@ </td> <td class='text' valign='top'> {% for line in lines %} - <p class='{{line.class}}'>{{line.text.rstrip|escape|not_empty}}</p> + <p class='{{line.class}}'>{% if line.annotate %}<span class='annotate' title='{{line.annotate_title}}'>{{line.annotate}}</span>{% endif %}{{line.html}}<span class='strut'> </span></p> {% endfor %} </td> </tr>
--- a/DebugClients/Python/coverage/htmlfiles/style.css Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/htmlfiles/style.css Thu Jan 07 13:42:51 2010 +0000 @@ -2,14 +2,14 @@ /* Page-wide styles */ html, body, h1, h2, h3, p, td, th { margin: 0; - padding: 0; - border: 0; - outline: 0; - font-weight: inherit; - font-style: inherit; - font-size: 100%; - font-family: inherit; - vertical-align: baseline; + padding: 0; + border: 0; + outline: 0; + font-weight: inherit; + font-style: inherit; + font-size: 100%; + font-family: inherit; + vertical-align: baseline; } /* Set baseline grid to 16 pt. */ @@ -54,6 +54,11 @@ } #footer { + margin: 1em 3em; + } + +#footer .content { + padding: 0; font-size: 85%; font-family: verdana, sans-serif; color: #666666; @@ -65,7 +70,7 @@ } /* Header styles */ -.content { +#header .content { padding: 1em 3em; } @@ -84,9 +89,14 @@ cursor: pointer; border-color: #999 #ccc #ccc #999; } -.stats span.hide { +.stats span.hide_run, .stats span.hide_exc, +.stats span.hide_mis, .stats span.hide_par, +.stats span.par.hide_run.hide_par { border-color: #ccc #999 #999 #ccc; } +.stats span.par.hide_run { + border-color: #999 #ccc #ccc #999; +} /* Source file styles */ .linenos p { @@ -95,7 +105,7 @@ padding: 0 .5em; color: #999999; font-family: verdana, sans-serif; - font-size: .625em; /* 10/16 */ + font-size: .625em; /* 10/16 */ line-height: 1.6em; /* 16/10 */ } td.text { @@ -112,7 +122,7 @@ background: #ffdddd; border-left: 2px solid #ff0000; } -.text p.run { +.text p.run, .text p.run.hide_par { background: #ddffdd; border-left: 2px solid #00ff00; } @@ -120,26 +130,69 @@ background: #eeeeee; border-left: 2px solid #808080; } -.text p.hide { +.text p.par, .text p.par.hide_run { + background: #ffffaa; + border-left: 2px solid #eeee99; + } +.text p.hide_run, .text p.hide_exc, .text p.hide_mis, .text p.hide_par, +.text p.hide_run.hide_par { background: inherit; } +.text span.annotate { + font-family: georgia; + font-style: italic; + color: #666; + float: right; + padding-right: .5em; + } +.text p.hide_par span.annotate { + display: none; + } + +/* Syntax coloring */ +.text .com { + color: green; + font-style: italic; + line-height: 1px; + } +.text .key { + font-weight: bold; + line-height: 1px; + } +.text .str { + color: #000080; + } + /* index styles */ #index td, #index th { text-align: right; - width: 6em; - padding: .25em 0; + width: 5em; + padding: .25em .5em; border-bottom: 1px solid #eee; } #index th { font-style: italic; color: #333; border-bottom: 1px solid #ccc; + cursor: pointer; + } +#index th:hover { + background: #eee; + border-bottom: 1px solid #999; + } +#index td.left, #index th.left { + padding-left: 0; + } +#index td.right, #index th.right { + padding-right: 0; + } +#index th.headerSortDown, #index th.headerSortUp { + border-bottom: 1px solid #000; } #index td.name, #index th.name { text-align: left; width: auto; - height: 1.5em; } #index td.name a { text-decoration: none; @@ -150,10 +203,12 @@ color: #000; } #index tr.total { - font-weight: bold; } #index tr.total td { - padding: .25em 0; + font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } +#index tr.file:hover { + background: #eeeeee; + }
--- a/DebugClients/Python/coverage/misc.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/misc.py Thu Jan 07 13:42:51 2010 +0000 @@ -2,10 +2,10 @@ def nice_pair(pair): """Make a nice string representation of a pair of numbers. - + If the numbers are equal, just return the number, otherwise return the pair with a dash between them, indicating the range. - + """ start, end = pair if start == end: @@ -20,32 +20,50 @@ Format a list of line numbers for printing by coalescing groups of lines as long as the lines represent consecutive statements. This will coalesce even if there are gaps between statements. - + For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". - + """ pairs = [] i = 0 j = 0 start = None - pairs = [] while i < len(statements) and j < len(lines): if statements[i] == lines[j]: if start == None: start = lines[j] end = lines[j] - j = j + 1 + j += 1 elif start: pairs.append((start, end)) start = None - i = i + 1 + i += 1 if start: pairs.append((start, end)) ret = ', '.join(map(nice_pair, pairs)) return ret +def expensive(fn): + """A decorator to cache the result of an expensive operation. + + Only applies to methods with no arguments. + + """ + attr = "_cache_" + fn.__name__ + def _wrapped(self): + """Inner fn that checks the cache.""" + if not hasattr(self, attr): + setattr(self, attr, fn(self)) + return getattr(self, attr) + return _wrapped + + class CoverageException(Exception): """An exception specific to Coverage.""" pass + +class NoSource(CoverageException): + """Used to indicate we couldn't find the source for a module.""" + pass
--- a/DebugClients/Python/coverage/parser.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/parser.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,93 +1,83 @@ """Code parsing for Coverage.""" -import re, token, tokenize, types -import cStringIO as StringIO +import glob, opcode, os, re, sys, token, tokenize + +from coverage.backward import set, sorted, StringIO # pylint: disable-msg=W0622 +from coverage.bytecode import ByteCodes, CodeObjects +from coverage.misc import nice_pair, CoverageException, NoSource, expensive + + +class CodeParser(object): + """Parse code to find executable lines, excluded lines, etc.""" + + def __init__(self, text=None, filename=None, exclude=None): + """ + Source can be provided as `text`, the text itself, or `filename`, from + which text will be read. Excluded lines are those that match + `exclude`, a regex. -from misc import nice_pair, CoverageException -from backward import set # pylint: disable-msg=W0622 - + """ + assert text or filename, "CodeParser needs either text or filename" + self.filename = filename or "<code>" + self.text = text + if not self.text: + try: + sourcef = open(self.filename, 'rU') + self.text = sourcef.read() + sourcef.close() + except IOError: + _, err, _ = sys.exc_info() + raise NoSource( + "No source for code: %r: %s" % (self.filename, err) + ) + self.text = self.text.replace('\r\n', '\n') -class CodeParser: - """Parse code to find executable lines, excluded lines, etc.""" - - def __init__(self, show_tokens=False): - self.show_tokens = show_tokens + self.exclude = exclude + + self.show_tokens = False # The text lines of the parsed code. - self.lines = None + self.lines = self.text.split('\n') # The line numbers of excluded lines of code. self.excluded = set() - + # The line numbers of docstring lines. self.docstrings = set() - + + # The line numbers of class definitions. + self.classdefs = set() + # A dict mapping line numbers to (lo,hi) for multi-line statements. self.multiline = {} - + # The line numbers that start statements. self.statement_starts = set() - def find_statement_starts(self, code): - """Find the starts of statements in compiled code. - - Uses co_lnotab described in Python/compile.c to find line numbers that - start statements, adding them to `self.statement_starts`. - - """ - # Adapted from dis.py in the standard library. - byte_increments = [ord(c) for c in code.co_lnotab[0::2]] - line_increments = [ord(c) for c in code.co_lnotab[1::2]] - - last_line_num = None - line_num = code.co_firstlineno - for byte_incr, line_incr in zip(byte_increments, line_increments): - if byte_incr: - if line_num != last_line_num: - self.statement_starts.add(line_num) - last_line_num = line_num - line_num += line_incr - if line_num != last_line_num: - self.statement_starts.add(line_num) + # Lazily-created ByteParser + self._byte_parser = None - def find_statements(self, code): - """Find the statements in `code`. - - Update `self.statement_starts`, a set of line numbers that start - statements. Recurses into all code objects reachable from `code`. - - """ - # Adapted from trace.py in the standard library. + def _get_byte_parser(self): + """Create a ByteParser on demand.""" + if not self._byte_parser: + self._byte_parser = \ + ByteParser(text=self.text, filename=self.filename) + return self._byte_parser + byte_parser = property(_get_byte_parser) - # Get all of the lineno information from this code. - self.find_statement_starts(code) - - # Check the constants for references to other code objects. - for c in code.co_consts: - if isinstance(c, types.CodeType): - # Found another code object, so recurse into it. - self.find_statements(c) + def _raw_parse(self): + """Parse the source to find the interesting facts about its lines. - def raw_parse(self, text=None, filename=None, exclude=None): - """Parse `text` to find the interesting facts about its lines. - A handful of member fields are updated. - + """ - if not text: - sourcef = open(filename, 'rU') - text = sourcef.read() - sourcef.close() - text = text.replace('\r\n', '\n') - self.lines = text.split('\n') - # Find lines which match an exclusion pattern. - if exclude: - re_exclude = re.compile(exclude) + if self.exclude: + re_exclude = re.compile(self.exclude) for i, ltext in enumerate(self.lines): if re_exclude.search(ltext): self.excluded.add(i+1) - + # Tokenize, to find excluded suites, to find docstrings, and to find # multi-line statements. indent = 0 @@ -96,17 +86,22 @@ prev_toktype = token.INDENT first_line = None - tokgen = tokenize.generate_tokens(StringIO.StringIO(text).readline) + tokgen = tokenize.generate_tokens(StringIO(self.text).readline) for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: - if self.show_tokens: - print "%10s %5s %-20r %r" % ( + if self.show_tokens: # pragma: no cover + print("%10s %5s %-20r %r" % ( tokenize.tok_name.get(toktype, toktype), nice_pair((slineno, elineno)), ttext, ltext - ) + )) if toktype == token.INDENT: indent += 1 elif toktype == token.DEDENT: indent -= 1 + elif toktype == token.NAME and ttext == 'class': + # Class definitions look like branches in the byte code, so + # we need to exclude them. The simplest way is to note the + # lines with the 'class' keyword. + self.classdefs.add(slineno) elif toktype == token.OP and ttext == ':': if not excluding and elineno in self.excluded: # Start excluding a suite. We trigger off of the colon @@ -116,8 +111,10 @@ excluding = True elif toktype == token.STRING and prev_toktype == token.INDENT: # Strings that are first on an indented line are docstrings. - # (a trick from trace.py in the stdlib.) - for i in xrange(slineno, elineno+1): + # (a trick from trace.py in the stdlib.) This works for + # 99.9999% of cases. For the rest (!) see: + # http://stackoverflow.com/questions/1769332/x/1769794#1769794 + for i in range(slineno, elineno+1): self.docstrings.add(i) elif toktype == token.NEWLINE: if first_line is not None and elineno != first_line: @@ -125,10 +122,10 @@ # different line than the first line of the statement, # so record a multi-line range. rng = (first_line, elineno) - for l in xrange(first_line, elineno+1): + for l in range(first_line, elineno+1): self.multiline[l] = rng first_line = None - + if ttext.strip() and toktype != tokenize.COMMENT: # A non-whitespace token. if first_line is None: @@ -140,86 +137,608 @@ excluding = False if excluding: self.excluded.add(elineno) - + prev_toktype = toktype # Find the starts of the executable statements. - filename = filename or "<code>" - try: - # Python 2.3 and 2.4 don't like partial last lines, so be sure the - # text ends nicely for them. - text += '\n' - code = compile(text, filename, "exec") - except SyntaxError, synerr: - raise CoverageException( - "Couldn't parse '%s' as Python source: '%s' at line %d" % - (filename, synerr.msg, synerr.lineno) - ) + self.statement_starts.update(self.byte_parser._find_statements()) - self.find_statements(code) + def first_line(self, line): + """Return the first line number of the statement including `line`.""" + rng = self.multiline.get(line) + if rng: + first_line = rng[0] + else: + first_line = line + return first_line - def map_to_first_line(self, lines, ignore=None): + def first_lines(self, lines, ignore=None): """Map the line numbers in `lines` to the correct first line of the statement. - + Skip any line mentioned in `ignore`. - + Returns a sorted list of the first lines. - + """ ignore = ignore or [] lset = set() for l in lines: if l in ignore: continue - rng = self.multiline.get(l) - if rng: - new_l = rng[0] - else: - new_l = l + new_l = self.first_line(l) if new_l not in ignore: lset.add(new_l) - lines = list(lset) - lines.sort() - return lines - - def parse_source(self, text=None, filename=None, exclude=None): + return sorted(lset) + + def parse_source(self): """Parse source text to find executable lines, excluded lines, etc. - - Source can be provided as `text`, the text itself, or `filename`, from - which text will be read. Excluded lines are those that match `exclude`, - a regex. - - Return values are 1) a sorted list of executable line numbers, - 2) a sorted list of excluded line numbers, and 3) a dict mapping line - numbers to pairs (lo,hi) for multi-line statements. - + + Return values are 1) a sorted list of executable line numbers, and + 2) a sorted list of excluded line numbers. + + Reported line numbers are normalized to the first line of multi-line + statements. + + """ + self._raw_parse() + + excluded_lines = self.first_lines(self.excluded) + ignore = excluded_lines + list(self.docstrings) + lines = self.first_lines(self.statement_starts, ignore) + + return lines, excluded_lines + + def arcs(self): + """Get information about the arcs available in the code. + + Returns a sorted list of line number pairs. Line numbers have been + normalized to the first line of multiline statements. + + """ + all_arcs = [] + for l1, l2 in self.byte_parser._all_arcs(): + fl1 = self.first_line(l1) + fl2 = self.first_line(l2) + if fl1 != fl2: + all_arcs.append((fl1, fl2)) + return sorted(all_arcs) + arcs = expensive(arcs) + + def exit_counts(self): + """Get a mapping from line numbers to count of exits from that line. + + Excluded lines are excluded. + + """ + excluded_lines = self.first_lines(self.excluded) + exit_counts = {} + for l1, l2 in self.arcs(): + if l1 == -1: + # Don't ever report -1 as a line number + continue + if l1 in excluded_lines: + # Don't report excluded lines as line numbers. + continue + if l2 in excluded_lines: + # Arcs to excluded lines shouldn't count. + continue + if l1 not in exit_counts: + exit_counts[l1] = 0 + exit_counts[l1] += 1 + + # Class definitions have one extra exit, so remove one for each: + for l in self.classdefs: + # Ensure key is there: classdefs can include excluded lines. + if l in exit_counts: + exit_counts[l] -= 1 + + return exit_counts + exit_counts = expensive(exit_counts) + + +## Opcodes that guide the ByteParser. + +def _opcode(name): + """Return the opcode by name from the opcode module.""" + return opcode.opmap[name] + +def _opcode_set(*names): + """Return a set of opcodes by the names in `names`.""" + return set([_opcode(name) for name in names]) + +# Opcodes that leave the code object. +OPS_CODE_END = _opcode_set('RETURN_VALUE') + +# Opcodes that unconditionally end the code chunk. +OPS_CHUNK_END = _opcode_set( + 'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS', + 'BREAK_LOOP', 'CONTINUE_LOOP', + ) + +# Opcodes that push a block on the block stack. +OPS_PUSH_BLOCK = _opcode_set('SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY') + +# Block types for exception handling. +OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY') + +# Opcodes that pop a block from the block stack. +OPS_POP_BLOCK = _opcode_set('POP_BLOCK') + +# Opcodes that have a jump destination, but aren't really a jump. +OPS_NO_JUMP = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY') + +# Individual opcodes we need below. +OP_BREAK_LOOP = _opcode('BREAK_LOOP') +OP_END_FINALLY = _opcode('END_FINALLY') +OP_COMPARE_OP = _opcode('COMPARE_OP') +COMPARE_EXCEPTION = 10 # just have to get this const from the code. +OP_LOAD_CONST = _opcode('LOAD_CONST') +OP_RETURN_VALUE = _opcode('RETURN_VALUE') + + +class ByteParser(object): + """Parse byte codes to understand the structure of code.""" + + def __init__(self, code=None, text=None, filename=None): + if code: + self.code = code + else: + if not text: + assert filename, "If no code or text, need a filename" + sourcef = open(filename, 'rU') + text = sourcef.read() + sourcef.close() + + try: + # Python 2.3 and 2.4 don't like partial last lines, so be sure + # the text ends nicely for them. + self.code = compile(text + '\n', filename, "exec") + except SyntaxError: + _, synerr, _ = sys.exc_info() + raise CoverageException( + "Couldn't parse '%s' as Python source: '%s' at line %d" % + (filename, synerr.msg, synerr.lineno) + ) + + def child_parsers(self): + """Iterate over all the code objects nested within this one. + + The iteration includes `self` as its first value. + """ - self.raw_parse(text, filename, exclude) - - excluded_lines = self.map_to_first_line(self.excluded) - ignore = excluded_lines + list(self.docstrings) - lines = self.map_to_first_line(self.statement_starts, ignore) - - return lines, excluded_lines, self.multiline + return map(lambda c: ByteParser(code=c), CodeObjects(self.code)) + + # Getting numbers from the lnotab value changed in Py3.0. + if sys.hexversion >= 0x03000000: + def _lnotab_increments(self, lnotab): + """Return a list of ints from the lnotab bytes in 3.x""" + return list(lnotab) + else: + def _lnotab_increments(self, lnotab): + """Return a list of ints from the lnotab string in 2.x""" + return [ord(c) for c in lnotab] + + def _bytes_lines(self): + """Map byte offsets to line numbers in `code`. + + Uses co_lnotab described in Python/compile.c to map byte offsets to + line numbers. Returns a list: [(b0, l0), (b1, l1), ...] + + """ + # Adapted from dis.py in the standard library. + byte_increments = self._lnotab_increments(self.code.co_lnotab[0::2]) + line_increments = self._lnotab_increments(self.code.co_lnotab[1::2]) + + bytes_lines = [] + last_line_num = None + line_num = self.code.co_firstlineno + byte_num = 0 + for byte_incr, line_incr in zip(byte_increments, line_increments): + if byte_incr: + if line_num != last_line_num: + bytes_lines.append((byte_num, line_num)) + last_line_num = line_num + byte_num += byte_incr + line_num += line_incr + if line_num != last_line_num: + bytes_lines.append((byte_num, line_num)) + return bytes_lines + + def _find_statements(self): + """Find the statements in `self.code`. + + Return a set of line numbers that start statements. Recurses into all + code objects reachable from `self.code`. + + """ + stmts = set() + for bp in self.child_parsers(): + # Get all of the lineno information from this code. + for _, l in bp._bytes_lines(): + stmts.add(l) + return stmts + + def _disassemble(self): # pragma: no cover + """Disassemble code, for ad-hoc experimenting.""" + + import dis + + for bp in self.child_parsers(): + print("\n%s: " % bp.code) + dis.dis(bp.code) + print("Bytes lines: %r" % bp._bytes_lines()) + + print("") + + def _split_into_chunks(self): + """Split the code object into a list of `Chunk` objects. + + Each chunk is only entered at its first instruction, though there can + be many exits from a chunk. + + Returns a list of `Chunk` objects. - def print_parse_results(self): - """Print the results of the parsing.""" - for i, ltext in enumerate(self.lines): - lineno = i+1 - m0 = m1 = m2 = ' ' - if lineno in self.statement_starts: - m0 = '-' - if lineno in self.docstrings: - m1 = '"' - if lineno in self.excluded: - m2 = 'x' - print "%4d %s%s%s %s" % (lineno, m0, m1, m2, ltext) + """ + + # The list of chunks so far, and the one we're working on. + chunks = [] + chunk = None + bytes_lines_map = dict(self._bytes_lines()) + + # The block stack: loops and try blocks get pushed here for the + # implicit jumps that can occur. + # Each entry is a tuple: (block type, destination) + block_stack = [] + + # Some op codes are followed by branches that should be ignored. This + # is a count of how many ignores are left. + ignore_branch = 0 + + # We have to handle the last two bytecodes specially. + ult = penult = None + + for bc in ByteCodes(self.code.co_code): + # Maybe have to start a new block + if bc.offset in bytes_lines_map: + if chunk: + chunk.exits.add(bc.offset) + chunk = Chunk(bc.offset, bytes_lines_map[bc.offset]) + chunks.append(chunk) + + if not chunk: + chunk = Chunk(bc.offset) + chunks.append(chunk) + + # Look at the opcode + if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP: + if ignore_branch: + # Someone earlier wanted us to ignore this branch. + ignore_branch -= 1 + else: + # The opcode has a jump, it's an exit for this chunk. + chunk.exits.add(bc.jump_to) + + if bc.op in OPS_CODE_END: + # The opcode can exit the code object. + chunk.exits.add(-1) + if bc.op in OPS_PUSH_BLOCK: + # The opcode adds a block to the block_stack. + block_stack.append((bc.op, bc.jump_to)) + if bc.op in OPS_POP_BLOCK: + # The opcode pops a block from the block stack. + block_stack.pop() + if bc.op in OPS_CHUNK_END: + # This opcode forces the end of the chunk. + if bc.op == OP_BREAK_LOOP: + # A break is implicit: jump where the top of the + # block_stack points. + chunk.exits.add(block_stack[-1][1]) + chunk = None + if bc.op == OP_END_FINALLY: + if block_stack: + # A break that goes through a finally will jump to whatever + # block is on top of the stack. + chunk.exits.add(block_stack[-1][1]) + # For the finally clause we need to find the closest exception + # block, and use its jump target as an exit. + for iblock in range(len(block_stack)-1, -1, -1): + if block_stack[iblock][0] in OPS_EXCEPT_BLOCKS: + chunk.exits.add(block_stack[iblock][1]) + break + if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION: + # This is an except clause. We want to overlook the next + # branch, so that except's don't count as branches. + ignore_branch += 1 + + penult = ult + ult = bc + if chunks: + # The last two bytecodes could be a dummy "return None" that + # shouldn't be counted as real code. Every Python code object seems + # to end with a return, and a "return None" is inserted if there + # isn't an explicit return in the source. + if ult and penult: + if penult.op == OP_LOAD_CONST and ult.op == OP_RETURN_VALUE: + if self.code.co_consts[penult.arg] is None: + # This is "return None", but is it dummy? A real line + # would be a last chunk all by itself. + if chunks[-1].byte != penult.offset: + # Split the last chunk + last_chunk = chunks[-1] + last_chunk.exits.remove(-1) + last_chunk.exits.add(penult.offset) + chunk = Chunk(penult.offset) + chunk.exits.add(-1) + chunks.append(chunk) + + # Give all the chunks a length. + chunks[-1].length = bc.next_offset - chunks[-1].byte + for i in range(len(chunks)-1): + chunks[i].length = chunks[i+1].byte - chunks[i].byte + + return chunks + + def _arcs(self): + """Find the executable arcs in the code. + + Returns a set of pairs, (from,to). From and to are integer line + numbers. If from is -1, then the arc is an entrance into the code + object. If to is -1, the arc is an exit from the code object. + + """ + chunks = self._split_into_chunks() + + # A map from byte offsets to chunks jumped into. + byte_chunks = dict([(c.byte, c) for c in chunks]) + + # Build a map from byte offsets to actual lines reached. + byte_lines = {-1:[-1]} + bytes_to_add = set([c.byte for c in chunks]) + + while bytes_to_add: + byte_to_add = bytes_to_add.pop() + if byte_to_add in byte_lines or byte_to_add == -1: + continue + + # Which lines does this chunk lead to? + bytes_considered = set() + bytes_to_consider = [byte_to_add] + lines = set() + + while bytes_to_consider: + byte = bytes_to_consider.pop() + bytes_considered.add(byte) + + # Find chunk for byte + try: + ch = byte_chunks[byte] + except KeyError: + for ch in chunks: + if ch.byte <= byte < ch.byte+ch.length: + break + else: + # No chunk for this byte! + raise Exception("Couldn't find chunk @ %d" % byte) + byte_chunks[byte] = ch + + if ch.line: + lines.add(ch.line) + else: + for ex in ch.exits: + if ex == -1: + lines.add(-1) + elif ex not in bytes_considered: + bytes_to_consider.append(ex) + + bytes_to_add.update(ch.exits) + + byte_lines[byte_to_add] = lines + + # Figure out for each chunk where the exits go. + arcs = set() + for chunk in chunks: + if chunk.line: + for ex in chunk.exits: + for exit_line in byte_lines[ex]: + if chunk.line != exit_line: + arcs.add((chunk.line, exit_line)) + for line in byte_lines[0]: + arcs.add((-1, line)) + + return arcs + + def _all_chunks(self): + """Returns a list of `Chunk` objects for this code and its children. + + See `_split_into_chunks` for details. + + """ + chunks = [] + for bp in self.child_parsers(): + chunks.extend(bp._split_into_chunks()) + + return chunks + + def _all_arcs(self): + """Get the set of all arcs in this code object and its children. + + See `_arcs` for details. + + """ + arcs = set() + for bp in self.child_parsers(): + arcs.update(bp._arcs()) + + return arcs + + +class Chunk(object): + """A sequence of bytecodes with a single entrance. + + To analyze byte code, we have to divide it into chunks, sequences of byte + codes such that each basic block has only one entrance, the first + instruction in the block. + + This is almost the CS concept of `basic block`_, except that we're willing + to have many exits from a chunk, and "basic block" is a more cumbersome + term. + + .. _basic block: http://en.wikipedia.org/wiki/Basic_block + + An exit of -1 means the chunk can leave the code (return). + + """ + def __init__(self, byte, line=0): + self.byte = byte + self.line = line + self.length = 0 + self.exits = set() + + def __repr__(self): + return "<%d+%d @%d %r>" % ( + self.byte, self.length, self.line, list(self.exits) + ) + + +class AdHocMain(object): # pragma: no cover + """An ad-hoc main for code parsing experiments.""" + + def main(self, args): + """A main function for trying the code from the command line.""" + + from optparse import OptionParser + + parser = OptionParser() + parser.add_option( + "-c", action="store_true", dest="chunks", + help="Show basic block chunks" + ) + parser.add_option( + "-d", action="store_true", dest="dis", + help="Disassemble" + ) + parser.add_option( + "-R", action="store_true", dest="recursive", + help="Recurse to find source files" + ) + parser.add_option( + "-s", action="store_true", dest="source", + help="Show analyzed source" + ) + parser.add_option( + "-t", action="store_true", dest="tokens", + help="Show tokens" + ) + + options, args = parser.parse_args() + if options.recursive: + if args: + root = args[0] + else: + root = "." + for root, _, _ in os.walk(root): + for f in glob.glob(root + "/*.py"): + self.adhoc_one_file(options, f) + else: + self.adhoc_one_file(options, args[0]) + + def adhoc_one_file(self, options, filename): + """Process just one file.""" + + if options.dis or options.chunks: + try: + bp = ByteParser(filename=filename) + except CoverageException: + _, err, _ = sys.exc_info() + print("%s" % (err,)) + return + + if options.dis: + print("Main code:") + bp._disassemble() + + if options.chunks: + chunks = bp._all_chunks() + if options.recursive: + print("%6d: %s" % (len(chunks), filename)) + else: + print("Chunks: %r" % chunks) + arcs = bp._all_arcs() + print("Arcs: %r" % sorted(arcs)) + + if options.source or options.tokens: + cp = CodeParser(filename=filename, exclude=r"no\s*cover") + cp.show_tokens = options.tokens + cp._raw_parse() + + if options.source: + if options.chunks: + arc_width, arc_chars = self.arc_ascii_art(arcs) + else: + arc_width, arc_chars = 0, {} + + exit_counts = cp.exit_counts() + + for i, ltext in enumerate(cp.lines): + lineno = i+1 + m0 = m1 = m2 = m3 = a = ' ' + if lineno in cp.statement_starts: + m0 = '-' + exits = exit_counts.get(lineno, 0) + if exits > 1: + m1 = str(exits) + if lineno in cp.docstrings: + m2 = '"' + if lineno in cp.classdefs: + m2 = 'C' + if lineno in cp.excluded: + m3 = 'x' + a = arc_chars.get(lineno, '').ljust(arc_width) + print("%4d %s%s%s%s%s %s" % + (lineno, m0, m1, m2, m3, a, ltext) + ) + + def arc_ascii_art(self, arcs): + """Draw arcs as ascii art. + + Returns a width of characters needed to draw all the arcs, and a + dictionary mapping line numbers to ascii strings to draw for that line. + + """ + arc_chars = {} + for lfrom, lto in sorted(arcs): + if lfrom == -1: + arc_chars[lto] = arc_chars.get(lto, '') + 'v' + elif lto == -1: + arc_chars[lfrom] = arc_chars.get(lfrom, '') + '^' + else: + if lfrom == lto-1: + # Don't show obvious arcs. + continue + if lfrom < lto: + l1, l2 = lfrom, lto + else: + l1, l2 = lto, lfrom + w = max([len(arc_chars.get(l, '')) for l in range(l1, l2+1)]) + for l in range(l1, l2+1): + if l == lfrom: + ch = '<' + elif l == lto: + ch = '>' + else: + ch = '|' + arc_chars[l] = arc_chars.get(l, '').ljust(w) + ch + arc_width = 0 + + if arc_chars: + arc_width = max([len(a) for a in arc_chars.values()]) + else: + arc_width = 0 + + return arc_width, arc_chars + if __name__ == '__main__': - import sys - - parser = CodeParser(show_tokens=True) - parser.raw_parse(filename=sys.argv[1], exclude=r"no\s*cover") - parser.print_parse_results() \ No newline at end of file + AdHocMain().main(sys.argv[1:])
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/DebugClients/Python/coverage/phystokens.py Thu Jan 07 13:42:51 2010 +0000 @@ -0,0 +1,107 @@ +"""Better tokenizing for coverage.py.""" + +import keyword, re, token, tokenize +from coverage.backward import StringIO # pylint: disable-msg=W0622 + +def phys_tokens(toks): + """Return all physical tokens, even line continuations. + + tokenize.generate_tokens() doesn't return a token for the backslash that + continues lines. This wrapper provides those tokens so that we can + re-create a faithful representation of the original source. + + Returns the same values as generate_tokens() + + """ + last_line = None + last_lineno = -1 + last_ttype = None + for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: + if last_lineno != elineno: + if last_line and last_line[-2:] == "\\\n": + # We are at the beginning of a new line, and the last line + # ended with a backslash. We probably have to inject a + # backslash token into the stream. Unfortunately, there's more + # to figure out. This code:: + # + # usage = """\ + # HEY THERE + # """ + # + # triggers this condition, but the token text is:: + # + # '"""\\\nHEY THERE\n"""' + # + # so we need to figure out if the backslash is already in the + # string token or not. + inject_backslash = True + if last_ttype == tokenize.COMMENT: + # Comments like this \ + # should never result in a new token. + inject_backslash = False + elif ttype == token.STRING: + if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\': + # It's a multiline string and the first line ends with + # a backslash, so we don't need to inject another. + inject_backslash = False + if inject_backslash: + # Figure out what column the backslash is in. + ccol = len(last_line.split("\n")[-2]) - 1 + # Yield the token, with a fake token type. + yield ( + 99999, "\\\n", + (slineno, ccol), (slineno, ccol+2), + last_line + ) + last_line = ltext + last_ttype = ttype + yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext + last_lineno = elineno + + +def source_token_lines(source): + """Generate a series of lines, one for each line in `source`. + + Each line is a list of pairs, each pair is a token:: + + [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] + + Each pair has a token class, and the token text. + + If you concatenate all the token texts, and then join them with newlines, + you should have your original `source` back, with two differences: + trailing whitespace is not preserved, and a final line with no newline + is indistinguishable from a final line with a newline. + + """ + ws_tokens = [token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL] + line = [] + col = 0 + tokgen = tokenize.generate_tokens(StringIO(source.expandtabs(8)).readline) + for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen): + mark_start = True + for part in re.split('(\n)', ttext): + if part == '\n': + yield line + line = [] + col = 0 + mark_end = False + elif part == '': + mark_end = False + elif ttype in ws_tokens: + mark_end = False + else: + if mark_start and scol > col: + line.append(("ws", " " * (scol - col))) + mark_start = False + tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] + if ttype == token.NAME and keyword.iskeyword(ttext): + tok_class = "key" + line.append((tok_class, part)) + mark_end = True + scol = 0 + if mark_end: + col = ecol + + if line: + yield line
--- a/DebugClients/Python/coverage/report.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/report.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,34 +1,35 @@ """Reporter foundation for Coverage.""" import os -from codeunit import code_unit_factory +from coverage.codeunit import code_unit_factory +from coverage.misc import CoverageException, NoSource class Reporter(object): """A base class for all reporters.""" - + def __init__(self, coverage, ignore_errors=False): """Create a reporter. - + `coverage` is the coverage instance. `ignore_errors` controls how skittish the reporter will be during file processing. """ self.coverage = coverage self.ignore_errors = ignore_errors - + # The code units to report on. Set by find_code_units. self.code_units = [] - + # The directory into which to place the report, used by some derived # classes. self.directory = None def find_code_units(self, morfs, omit_prefixes): """Find the code units we'll report on. - + `morfs` is a list of modules or filenames. `omit_prefixes` is a list of prefixes to leave out of the list. - + """ morfs = morfs or self.coverage.data.executed_files() self.code_units = code_unit_factory( @@ -38,24 +39,22 @@ def report_files(self, report_fn, morfs, directory=None, omit_prefixes=None): """Run a reporting function on a number of morfs. - + `report_fn` is called for each relative morf in `morfs`. - + """ self.find_code_units(morfs, omit_prefixes) + if not self.code_units: + raise CoverageException("No data to report.") + self.directory = directory if self.directory and not os.path.exists(self.directory): os.makedirs(self.directory) for cu in self.code_units: try: - if not cu.relative: - continue - statements, excluded, missing, _ = self.coverage._analyze(cu) - report_fn(cu, statements, excluded, missing) - except KeyboardInterrupt: - raise - except: + report_fn(cu, self.coverage._analyze(cu)) + except NoSource: if not self.ignore_errors: raise
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/DebugClients/Python/coverage/results.py Thu Jan 07 13:42:51 2010 +0000 @@ -0,0 +1,175 @@ +"""Results of coverage measurement.""" + +import os + +from coverage.backward import set, sorted # pylint: disable-msg=W0622 +from coverage.misc import format_lines, NoSource +from coverage.parser import CodeParser + + +class Analysis(object): + """The results of analyzing a code unit.""" + + def __init__(self, cov, code_unit): + self.coverage = cov + self.code_unit = code_unit + + self.filename = self.code_unit.filename + ext = os.path.splitext(self.filename)[1] + source = None + if ext == '.py': + if not os.path.exists(self.filename): + source = self.coverage.file_locator.get_zip_data(self.filename) + if not source: + raise NoSource("No source for code: %r" % self.filename) + + self.parser = CodeParser( + text=source, filename=self.filename, + exclude=self.coverage.exclude_re + ) + self.statements, self.excluded = self.parser.parse_source() + + # Identify missing statements. + executed = self.coverage.data.executed_lines(self.filename) + exec1 = self.parser.first_lines(executed) + self.missing = sorted(set(self.statements) - set(exec1)) + + if self.coverage.data.has_arcs(): + n_branches = self.total_branches() + mba = self.missing_branch_arcs() + n_missing_branches = sum([len(v) for v in mba.values()]) + else: + n_branches = n_missing_branches = 0 + + self.numbers = Numbers( + n_files=1, + n_statements=len(self.statements), + n_excluded=len(self.excluded), + n_missing=len(self.missing), + n_branches=n_branches, + n_missing_branches=n_missing_branches, + ) + + def missing_formatted(self): + """The missing line numbers, formatted nicely. + + Returns a string like "1-2, 5-11, 13-14". + + """ + return format_lines(self.statements, self.missing) + + def has_arcs(self): + """Were arcs measured in this result?""" + return self.coverage.data.has_arcs() + + def arc_possibilities(self): + """Returns a sorted list of the arcs in the code.""" + return self.parser.arcs() + + def arcs_executed(self): + """Returns a sorted list of the arcs actually executed in the code.""" + executed = self.coverage.data.executed_arcs(self.filename) + m2fl = self.parser.first_line + executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed] + return sorted(executed) + + def arcs_missing(self): + """Returns a sorted list of the arcs in the code not executed.""" + possible = self.arc_possibilities() + executed = self.arcs_executed() + missing = [p for p in possible if p not in executed] + return sorted(missing) + + def arcs_unpredicted(self): + """Returns a sorted list of the executed arcs missing from the code.""" + possible = self.arc_possibilities() + executed = self.arcs_executed() + # Exclude arcs here which connect a line to itself. They can occur + # in executed data in some cases. This is where they can cause + # trouble, and here is where it's the least burden to remove them. + unpredicted = [ + e for e in executed + if e not in possible and e[0] != e[1] + ] + return sorted(unpredicted) + + def branch_lines(self): + """Returns lines that have more than one exit.""" + exit_counts = self.parser.exit_counts() + return [l1 for l1,count in exit_counts.items() if count > 1] + + def total_branches(self): + """How many total branches are there?""" + exit_counts = self.parser.exit_counts() + return sum([count for count in exit_counts.values() if count > 1]) + + def missing_branch_arcs(self): + """Return arcs that weren't executed from branch lines. + + Returns {l1:[l2a,l2b,...], ...} + + """ + missing = self.arcs_missing() + branch_lines = set(self.branch_lines()) + mba = {} + for l1, l2 in missing: + if l1 in branch_lines: + if l1 not in mba: + mba[l1] = [] + mba[l1].append(l2) + return mba + + +class Numbers(object): + """The numerical results of measuring coverage. + + This holds the basic statistics from `Analysis`, and is used to roll + up statistics across files. + + """ + def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0, + n_branches=0, n_missing_branches=0 + ): + self.n_files = n_files + self.n_statements = n_statements + self.n_excluded = n_excluded + self.n_missing = n_missing + self.n_branches = n_branches + self.n_missing_branches = n_missing_branches + + def _get_n_executed(self): + """Returns the number of executed statements.""" + return self.n_statements - self.n_missing + n_executed = property(_get_n_executed) + + def _get_n_executed_branches(self): + """Returns the number of executed branches.""" + return self.n_branches - self.n_missing_branches + n_executed_branches = property(_get_n_executed_branches) + + def _get_pc_covered(self): + """Returns a single percentage value for coverage.""" + if self.n_statements > 0: + pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) / + (self.n_statements + self.n_branches)) + else: + pc_cov = 100.0 + return pc_cov + pc_covered = property(_get_pc_covered) + + def __add__(self, other): + nums = Numbers() + nums.n_files = self.n_files + other.n_files + nums.n_statements = self.n_statements + other.n_statements + nums.n_excluded = self.n_excluded + other.n_excluded + nums.n_missing = self.n_missing + other.n_missing + nums.n_branches = self.n_branches + other.n_branches + nums.n_missing_branches = (self.n_missing_branches + + other.n_missing_branches) + return nums + + def __radd__(self, other): + # Implementing 0+Numbers allows us to sum() a list of Numbers. + if other == 0: + return self + raise NotImplemented
--- a/DebugClients/Python/coverage/summary.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/summary.py Thu Jan 07 13:42:51 2010 +0000 @@ -2,31 +2,40 @@ import sys -from report import Reporter +from coverage.report import Reporter +from coverage.results import Numbers class SummaryReporter(Reporter): """A reporter for writing the summary report.""" - + def __init__(self, coverage, show_missing=True, ignore_errors=False): super(SummaryReporter, self).__init__(coverage, ignore_errors) self.show_missing = show_missing + self.branches = coverage.data.has_arcs() def report(self, morfs, omit_prefixes=None, outfile=None): """Writes a report summarizing coverage statistics per module.""" - + self.find_code_units(morfs, omit_prefixes) # Prepare the formatting strings max_name = max([len(cu.name) for cu in self.code_units] + [5]) fmt_name = "%%- %ds " % max_name fmt_err = "%s %s: %s\n" - header = fmt_name % "Name" + " Stmts Exec Cover\n" - fmt_coverage = fmt_name + "% 6d % 6d % 5d%%\n" + header = (fmt_name % "Name") + " Stmts Exec" + fmt_coverage = fmt_name + "%6d %6d" + if self.branches: + header += " Branch BrExec" + fmt_coverage += " %6d %6d" + header += " Cover" + fmt_coverage += " %5d%%" if self.show_missing: - header = header.replace("\n", " Missing\n") - fmt_coverage = fmt_coverage.replace("\n", " %s\n") - rule = "-" * (len(header)-1) + "\n" + header += " Missing" + fmt_coverage += " %s" + rule = "-" * len(header) + "\n" + header += "\n" + fmt_coverage += "\n" if not outfile: outfile = sys.stdout @@ -35,26 +44,20 @@ outfile.write(header) outfile.write(rule) - total_statements = 0 - total_executed = 0 - total_units = 0 - + total = Numbers() + for cu in self.code_units: try: - statements, _, missing, readable = self.coverage._analyze(cu) - n = len(statements) - m = n - len(missing) - if n > 0: - pc = 100.0 * m / n - else: - pc = 100.0 - args = (cu.name, n, m, pc) + analysis = self.coverage._analyze(cu) + nums = analysis.numbers + args = (cu.name, nums.n_statements, nums.n_executed) + if self.branches: + args += (nums.n_branches, nums.n_executed_branches) + args += (nums.pc_covered,) if self.show_missing: - args = args + (readable,) + args += (analysis.missing_formatted(),) outfile.write(fmt_coverage % args) - total_units += 1 - total_statements = total_statements + n - total_executed = total_executed + m + total += nums except KeyboardInterrupt: #pragma: no cover raise except: @@ -62,13 +65,12 @@ typ, msg = sys.exc_info()[:2] outfile.write(fmt_err % (cu.name, typ.__name__, msg)) - if total_units > 1: + if total.n_files > 1: outfile.write(rule) - if total_statements > 0: - pc = 100.0 * total_executed / total_statements - else: - pc = 100.0 - args = ("TOTAL", total_statements, total_executed, pc) + args = ("TOTAL", total.n_statements, total.n_executed) + if self.branches: + args += (total.n_branches, total.n_executed_branches) + args += (total.pc_covered,) if self.show_missing: - args = args + ("",) - outfile.write(fmt_coverage % args) \ No newline at end of file + args += ("",) + outfile.write(fmt_coverage % args)
--- a/DebugClients/Python/coverage/templite.py Thu Jan 07 13:42:05 2010 +0000 +++ b/DebugClients/Python/coverage/templite.py Thu Jan 07 13:42:51 2010 +0000 @@ -1,112 +1,166 @@ """A simple Python template renderer, for a nano-subset of Django syntax.""" -# Started from http://blog.ianbicking.org/templating-via-dict-wrappers.html -# and http://jtauber.com/2006/05/templates.html -# and http://code.activestate.com/recipes/496730/ +# Coincidentally named the same as http://code.activestate.com/recipes/496702/ -import re +import re, sys class Templite(object): """A simple template renderer, for a nano-subset of Django syntax. Supported constructs are extended variable access:: - + {{var.modifer.modifier|filter|filter}} - - and loops:: - + + loops:: + {% for var in list %}...{% endfor %} - + + and ifs:: + + {% if var %}...{% endif %} + + Comments are within curly-hash markers:: + + {# This will be ignored #} + Construct a Templite with the template text, then use `render` against a dictionary context to create a finished string. - + """ def __init__(self, text, *contexts): """Construct a Templite with the given `text`. - + `contexts` are dictionaries of values to use for future renderings. These are good for filters and global values. - + """ - self.loops = [] - self.text = self._prepare(text) + self.text = text self.context = {} for context in contexts: self.context.update(context) + # Split the text to form a list of tokens. + toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) + + # Parse the tokens into a nested list of operations. Each item in the + # list is a tuple with an opcode, and arguments. They'll be + # interpreted by TempliteEngine. + # + # When parsing an action tag with nested content (if, for), the current + # ops list is pushed onto ops_stack, and the parsing continues in a new + # ops list that is part of the arguments to the if or for op. + ops = [] + ops_stack = [] + for tok in toks: + if tok.startswith('{{'): + # Expression: ('exp', expr) + ops.append(('exp', tok[2:-2].strip())) + elif tok.startswith('{#'): + # Comment: ignore it and move on. + continue + elif tok.startswith('{%'): + # Action tag: split into words and parse further. + words = tok[2:-2].strip().split() + if words[0] == 'if': + # If: ('if', (expr, body_ops)) + if_ops = [] + assert len(words) == 2 + ops.append(('if', (words[1], if_ops))) + ops_stack.append(ops) + ops = if_ops + elif words[0] == 'for': + # For: ('for', (varname, listexpr, body_ops)) + assert len(words) == 4 and words[2] == 'in' + for_ops = [] + ops.append(('for', (words[1], words[3], for_ops))) + ops_stack.append(ops) + ops = for_ops + elif words[0].startswith('end'): + # Endsomething. Pop the ops stack + ops = ops_stack.pop() + assert ops[-1][0] == words[0][3:] + else: + raise SyntaxError("Don't understand tag %r" % words) + else: + ops.append(('lit', tok)) + + assert not ops_stack, "Unmatched action tag: %r" % ops_stack[-1][0] + self.ops = ops + def render(self, context=None): """Render this template by applying it to `context`. - + `context` is a dictionary of values to use in this rendering. - + """ # Make the complete context we'll use. ctx = dict(self.context) if context: ctx.update(context) - - ctxaccess = _ContextAccess(ctx) - - # Render the loops. - for iloop, (loopvar, listvar, loopbody) in enumerate(self.loops): - result = "" - for listval in ctxaccess[listvar]: - ctx[loopvar] = listval - result += loopbody % ctxaccess - ctx["loop:%d" % iloop] = result - - # Render the final template. - return self.text % ctxaccess - def _prepare(self, text): - """Convert Django-style data references into Python-native ones.""" - # Pull out loops. - text = re.sub( - r"(?s){% for ([a-z0-9_]+) in ([a-z0-9_.|]+) %}(.*?){% endfor %}", - self._loop_prepare, text - ) - # Protect actual percent signs in the text. - text = text.replace("%", "%%") - # Convert {{foo}} into %(foo)s - text = re.sub(r"{{([^}]+)}}", r"%(\1)s", text) - return text - - def _loop_prepare(self, match): - """Prepare a loop body for `_prepare`.""" - nloop = len(self.loops) - # Append (loopvar, listvar, loopbody) to self.loops - loopvar, listvar, loopbody = match.groups() - loopbody = self._prepare(loopbody) - self.loops.append((loopvar, listvar, loopbody)) - return "{{loop:%d}}" % nloop + # Run it through an engine, and return the result. + engine = _TempliteEngine(ctx) + engine.execute(self.ops) + return engine.result -class _ContextAccess(object): - """A mediator for a context. - - Implements __getitem__ on a context for Templite, so that string formatting - references can pull data from the context. - - """ +class _TempliteEngine(object): + """Executes Templite objects to produce strings.""" def __init__(self, context): self.context = context + self.result = "" - def __getitem__(self, key): - if "|" in key: - pipes = key.split("|") - value = self[pipes[0]] + def execute(self, ops): + """Execute `ops` in the engine. + + Called recursively for the bodies of if's and loops. + + """ + for op, args in ops: + if op == 'lit': + self.result += args + elif op == 'exp': + try: + self.result += str(self.evaluate(args)) + except: + exc_class, exc, _ = sys.exc_info() + new_exc = exc_class("Couldn't evaluate {{ %s }}: %s" + % (args, exc)) + raise new_exc + elif op == 'if': + expr, body = args + if self.evaluate(expr): + self.execute(body) + elif op == 'for': + var, lis, body = args + vals = self.evaluate(lis) + for val in vals: + self.context[var] = val + self.execute(body) + else: + raise AssertionError("TempliteEngine doesn't grok op %r" % op) + + def evaluate(self, expr): + """Evaluate an expression. + + `expr` can have pipes and dots to indicate data access and filtering. + + """ + if "|" in expr: + pipes = expr.split("|") + value = self.evaluate(pipes[0]) for func in pipes[1:]: - value = self[func](value) - elif "." in key: - dots = key.split('.') - value = self[dots[0]] + value = self.evaluate(func)(value) + elif "." in expr: + dots = expr.split('.') + value = self.evaluate(dots[0]) for dot in dots[1:]: try: value = getattr(value, dot) except AttributeError: value = value[dot] - if callable(value): + if hasattr(value, '__call__'): value = value() else: - value = self.context[key] + value = self.context[expr] return value
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/DebugClients/Python/coverage/xmlreport.py Thu Jan 07 13:42:51 2010 +0000 @@ -0,0 +1,146 @@ +"""XML reporting for coverage.py""" + +import os, sys, time +import xml.dom.minidom + +from coverage import __url__, __version__ +from coverage.backward import sorted # pylint: disable-msg=W0622 +from coverage.report import Reporter + +def rate(hit, num): + """Return the fraction of `hit`/`num`.""" + return hit / (num or 1.0) + + +class XmlReporter(Reporter): + """A reporter for writing Cobertura-style XML coverage results.""" + + def __init__(self, coverage, ignore_errors=False): + super(XmlReporter, self).__init__(coverage, ignore_errors) + + self.packages = None + self.xml_out = None + self.arcs = coverage.data.has_arcs() + + def report(self, morfs, omit_prefixes=None, outfile=None): + """Generate a Cobertura-compatible XML report for `morfs`. + + `morfs` is a list of modules or filenames. `omit_prefixes` is a list + of strings, prefixes of modules to omit from the report. + + """ + # Initial setup. + outfile = outfile or sys.stdout + + # Create the DOM that will store the data. + impl = xml.dom.minidom.getDOMImplementation() + docType = impl.createDocumentType( + "coverage", None, + "http://cobertura.sourceforge.net/xml/coverage-03.dtd" + ) + self.xml_out = impl.createDocument(None, "coverage", docType) + + # Write header stuff. + xcoverage = self.xml_out.documentElement + xcoverage.setAttribute("version", __version__) + xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) + xcoverage.appendChild(self.xml_out.createComment( + " Generated by coverage.py: %s " % __url__ + )) + xpackages = self.xml_out.createElement("packages") + xcoverage.appendChild(xpackages) + + # Call xml_file for each file in the data. + self.packages = {} + self.report_files(self.xml_file, morfs, omit_prefixes=omit_prefixes) + + lnum_tot, lhits_tot = 0, 0 + bnum_tot, bhits_tot = 0, 0 + + # Populate the XML DOM with the package info. + for pkg_name, pkg_data in self.packages.items(): + class_elts, lhits, lnum, bhits, bnum = pkg_data + xpackage = self.xml_out.createElement("package") + xpackages.appendChild(xpackage) + xclasses = self.xml_out.createElement("classes") + xpackage.appendChild(xclasses) + for className in sorted(class_elts.keys()): + xclasses.appendChild(class_elts[className]) + xpackage.setAttribute("name", pkg_name.replace(os.sep, '.')) + xpackage.setAttribute("line-rate", str(rate(lhits, lnum))) + xpackage.setAttribute("branch-rate", str(rate(bhits, bnum))) + xpackage.setAttribute("complexity", "0.0") + + lnum_tot += lnum + lhits_tot += lhits + bnum_tot += bnum + bhits_tot += bhits + + xcoverage.setAttribute("line-rate", str(rate(lhits_tot, lnum_tot))) + xcoverage.setAttribute("branch-rate", str(rate(bhits_tot, bnum_tot))) + + # Use the DOM to write the output file. + outfile.write(self.xml_out.toprettyxml()) + + def xml_file(self, cu, analysis): + """Add to the XML report for a single file.""" + + # Create the 'lines' and 'package' XML elements, which + # are populated later. Note that a package == a directory. + dirname, fname = os.path.split(cu.name) + dirname = dirname or '.' + package = self.packages.setdefault(dirname, [ {}, 0, 0, 0, 0 ]) + + xclass = self.xml_out.createElement("class") + + xclass.appendChild(self.xml_out.createElement("methods")) + + xlines = self.xml_out.createElement("lines") + xclass.appendChild(xlines) + className = fname.replace('.', '_') + xclass.setAttribute("name", className) + ext = os.path.splitext(cu.filename)[1] + xclass.setAttribute("filename", cu.name + ext) + xclass.setAttribute("complexity", "0.0") + + branch_lines = analysis.branch_lines() + + # For each statement, create an XML 'line' element. + for line in analysis.statements: + xline = self.xml_out.createElement("line") + xline.setAttribute("number", str(line)) + + # Q: can we get info about the number of times a statement is + # executed? If so, that should be recorded here. + xline.setAttribute("hits", str(int(not line in analysis.missing))) + + if self.arcs: + if line in branch_lines: + xline.setAttribute("branch", "true") + xlines.appendChild(xline) + + class_lines = 1.0 * len(analysis.statements) + class_hits = class_lines - len(analysis.missing) + + if self.arcs: + # We assume here that every branch line has 2 exits, which is + # usually true. In theory, though, we could have a branch line + # with more exits.. + class_branches = 2.0 * len(branch_lines) + missed_branch_targets = analysis.missing_branch_arcs().values() + missing_branches = sum([len(b) for b in missed_branch_targets]) + class_branch_hits = class_branches - missing_branches + else: + class_branches = 0.0 + class_branch_hits = 0.0 + + # Finalize the statistics that are collected in the XML DOM. + line_rate = rate(class_hits, class_lines) + branch_rate = rate(class_branch_hits, class_branches) + xclass.setAttribute("line-rate", str(line_rate)) + xclass.setAttribute("branch-rate", str(branch_rate)) + package[0][className] = xclass + package[1] += class_hits + package[2] += class_lines + package[3] += class_branch_hits + package[4] += class_branches
--- a/eric5.e4p Thu Jan 07 13:42:05 2010 +0000 +++ b/eric5.e4p Thu Jan 07 13:42:51 2010 +0000 @@ -754,6 +754,10 @@ <Source>DebugClients/Python3/coverage/xmlreport.py</Source> <Source>DebugClients/Python3/coverage/phystokens.py</Source> <Source>DebugClients/Python3/coverage/results.py</Source> + <Source>DebugClients/Python/coverage/bytecode.py</Source> + <Source>DebugClients/Python/coverage/xmlreport.py</Source> + <Source>DebugClients/Python/coverage/phystokens.py</Source> + <Source>DebugClients/Python/coverage/results.py</Source> </Sources> <Forms> <Form>PyUnit/UnittestDialog.ui</Form>