diff -r 456c58fc64b0 -r d0d6e4ad31bd DebugClients/Python3/coverage/control.py --- a/DebugClients/Python3/coverage/control.py Sun Oct 04 13:35:09 2015 +0200 +++ b/DebugClients/Python3/coverage/control.py Sun Oct 04 22:37:56 2015 +0200 @@ -1,49 +1,65 @@ -"""Core control stuff for Coverage.""" +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Core control stuff for coverage.py.""" -import atexit, os, random, socket, sys +import atexit +import inspect +import os +import platform +import sys +import traceback -from .annotate import AnnotateReporter -from .backward import string_class, iitems, sorted # pylint: disable=W0622 -from .codeunit import code_unit_factory, CodeUnit -from .collector import Collector -from .config import CoverageConfig -from .data import CoverageData -from .debug import DebugControl -from .files import FileLocator, TreeMatcher, FnmatchMatcher -from .files import PathAliases, find_python_files, prep_patterns -#from .html import HtmlReporter # Comment for eric6 -from .misc import CoverageException, bool_or_none, join_regex -from .misc import file_be_gone -from .results import Analysis, Numbers -from .summary import SummaryReporter -from .xmlreport import XmlReporter +from coverage import env, files +from coverage.annotate import AnnotateReporter +from coverage.backward import string_class, iitems +from coverage.collector import Collector +from coverage.config import CoverageConfig +from coverage.data import CoverageData, CoverageDataFiles +from coverage.debug import DebugControl +from coverage.files import TreeMatcher, FnmatchMatcher +from coverage.files import PathAliases, find_python_files, prep_patterns +from coverage.files import ModuleMatcher, abs_file +from coverage.html import HtmlReporter +from coverage.misc import CoverageException, bool_or_none, join_regex +from coverage.misc import file_be_gone +from coverage.monkey import patch_multiprocessing +from coverage.plugin import FileReporter +from coverage.plugin_support import Plugins +from coverage.python import PythonFileReporter +from coverage.results import Analysis, Numbers +from coverage.summary import SummaryReporter +from coverage.xmlreport import XmlReporter + # Pypy has some unusual stuff in the "stdlib". Consider those locations # when deciding where the stdlib is. try: - import _structseq # pylint: disable=F0401 + import _structseq except ImportError: _structseq = None -class coverage(object): +class Coverage(object): """Programmatic access to coverage.py. To use:: - from . import coverage + from coverage import Coverage - cov = coverage() + cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ - def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, - auto_data=False, timid=None, branch=None, config_file=True, - source=None, omit=None, include=None, debug=None, - debug_file=None): + def __init__( + self, data_file=None, data_suffix=None, cover_pylib=None, + auto_data=False, timid=None, branch=None, config_file=True, + source=None, omit=None, include=None, debug=None, + concurrency=None, + ): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to @@ -65,324 +81,253 @@ If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. - `config_file` determines what config file to read. If it is a string, - it is the name of the config file to read. If it is True, then a - standard file is read (".coveragerc"). If it is False, then no file is - read. + `config_file` determines what configuration file to read: + + * If it is ".coveragerc", it is interpreted as if it were True, + for backward compatibility. + + * If it is a string, it is the name of the file to read. If the + file can't be read, it is an error. + + * If it is True, then a few standard files names are tried + (".coveragerc", "setup.cfg"). It is not an error for these files + to not be found. + + * If it is False, then no configuration file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. - `include` and `omit` are lists of filename patterns. Files that match + `include` and `omit` are lists of file name patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is - desired. `debug_file` is the file to write debug messages to, - defaulting to stderr. + desired. + + `concurrency` is a string indicating the concurrency library being used + in the measured code. Without this, coverage.py will get incorrect + results. Valid strings are "greenlet", "eventlet", "gevent", or + "thread" (the default). + + .. versionadded:: 4.0 + The `concurrency` parameter. """ - from . import __version__ - - # A record of all the warnings that have been issued. - self._warnings = [] - # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() - # 2: from the coveragerc file: + # 2: from the rcfile, .coveragerc or setup.cfg file: if config_file: - if config_file is True: + did_read_rc = False + # Some API users were specifying ".coveragerc" to mean the same as + # True, so make it so. + if config_file == ".coveragerc": + config_file = True + specified_file = (config_file is not True) + if not specified_file: config_file = ".coveragerc" - try: - self.config.from_file(config_file) - except ValueError: - _, err, _ = sys.exc_info() - raise CoverageException( - "Couldn't read config file %s: %s" % (config_file, err) - ) + + did_read_rc = self.config.from_file(config_file) + + if not did_read_rc: + if specified_file: + raise CoverageException( + "Couldn't read '%s' as a config file" % config_file + ) + self.config.from_file("setup.cfg", section_prefix="coverage:") # 3: from environment variables: - self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file + debugs = os.environ.get('COVERAGE_DEBUG') + if debugs: + self.config.debug.extend(debugs.split(",")) # 4: from constructor arguments: self.config.from_args( data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug, + concurrency=concurrency, ) - # Create and configure the debugging controller. - self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) + self._debug_file = None + self._auto_data = auto_data + self._data_suffix = data_suffix + + # The matchers for _should_trace. + self.source_match = None + self.source_pkgs_match = None + self.pylib_match = self.cover_match = None + self.include_match = self.omit_match = None + + # Is it ok for no data to be collected? + self._warn_no_data = True + self._warn_unimported_source = True + + # A record of all the warnings that have been issued. + self._warnings = [] + + # Other instance attributes, set later. + self.omit = self.include = self.source = None + self.source_pkgs = None + self.data = self.data_files = self.collector = None + self.plugins = None + self.pylib_dirs = self.cover_dirs = None + self.data_suffix = self.run_suffix = None + self._exclude_re = None + self.debug = None - self.auto_data = auto_data + # State machine variables: + # Have we initialized everything? + self._inited = False + # Have we started collecting and not stopped it? + self._started = False + # Have we measured some data and not harvested it? + self._measured = False + + def _init(self): + """Set all the initial state. + + This is called by the public methods to initialize state. This lets us + construct a :class:`Coverage` object, then tweak its state before this + function is called. - # _exclude_re is a dict mapping exclusion list names to compiled + """ + if self._inited: + return + + # Create and configure the debugging controller. COVERAGE_DEBUG_FILE + # is an environment variable, the name of a file to append debug logs + # to. + if self._debug_file is None: + debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE") + if debug_file_name: + self._debug_file = open(debug_file_name, "a") + else: + self._debug_file = sys.stderr + self.debug = DebugControl(self.config.debug, self._debug_file) + + # Load plugins + self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug) + + # _exclude_re is a dict that maps exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() - self.file_locator = FileLocator() + files.set_relative_directory() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): - self.source.append(self.file_locator.canonical_filename(src)) + self.source.append(files.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) + concurrency = self.config.concurrency + if concurrency == "multiprocessing": + patch_multiprocessing() + concurrency = None + self.collector = Collector( - self._should_trace, timid=self.config.timid, - branch=self.config.branch, warn=self._warn + should_trace=self._should_trace, + check_include=self._check_include_omit_etc, + timid=self.config.timid, + branch=self.config.branch, + warn=self._warn, + concurrency=concurrency, ) + # Early warning if we aren't going to be able to support plugins. + if self.plugins.file_tracers and not self.collector.supports_plugins: + self._warn( + "Plugin file tracers (%s) aren't supported with %s" % ( + ", ".join( + plugin._coverage_plugin_name + for plugin in self.plugins.file_tracers + ), + self.collector.tracer_name(), + ) + ) + for plugin in self.plugins.file_tracers: + plugin._coverage_enabled = False + # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. - if data_suffix or self.config.parallel: - if not isinstance(data_suffix, string_class): + if self._data_suffix or self.config.parallel: + if not isinstance(self._data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random - data_suffix = True + self._data_suffix = True else: - data_suffix = None + self._data_suffix = None self.data_suffix = None - self.run_suffix = data_suffix + self.run_suffix = self._data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. - self.data = CoverageData( - basename=self.config.data_file, - collector="coverage v%s" % __version__, - debug=self.debug, - ) + self.data = CoverageData(debug=self.debug) + self.data_files = CoverageDataFiles(basename=self.config.data_file) - # The dirs for files considered "installed with the interpreter". - self.pylib_dirs = [] + # The directories for files considered "installed with the interpreter". + self.pylib_dirs = set() if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. - for m in (atexit, os, random, socket, _structseq): + for m in (atexit, inspect, os, platform, _structseq, traceback): if m is not None and hasattr(m, "__file__"): - m_dir = self._canonical_dir(m) - if m_dir not in self.pylib_dirs: - self.pylib_dirs.append(m_dir) + self.pylib_dirs.add(self._canonical_dir(m)) + if _structseq and not hasattr(_structseq, '__file__'): + # PyPy 2.4 has no __file__ in the builtin modules, but the code + # objects still have the file names. So dig into one to find + # the path to exclude. + structseq_new = _structseq.structseq_new + try: + structseq_file = structseq_new.func_code.co_filename + except AttributeError: + structseq_file = structseq_new.__code__.co_filename + self.pylib_dirs.add(self._canonical_dir(structseq_file)) - # To avoid tracing the coverage code itself, we skip anything located - # where we are. - self.cover_dir = self._canonical_dir(__file__) - - # The matchers for _should_trace. - self.source_match = None - self.pylib_match = self.cover_match = None - self.include_match = self.omit_match = None + # To avoid tracing the coverage.py code itself, we skip anything + # located where we are. + self.cover_dirs = [self._canonical_dir(__file__)] + if env.TESTING: + # When testing, we use PyContracts, which should be considered + # part of coverage.py, and it uses six. Exclude those directories + # just as we exclude ourselves. + import contracts, six + for mod in [contracts, six]: + self.cover_dirs.append(self._canonical_dir(mod)) # Set the reporting precision. Numbers.set_precision(self.config.precision) - # Is it ok for no data to be collected? - self._warn_no_data = True - self._warn_unimported_source = True - - # State machine variables: - # Have we started collecting and not stopped it? - self._started = False - # Have we measured some data and not harvested it? - self._measured = False - atexit.register(self._atexit) - def _canonical_dir(self, morf): - """Return the canonical directory of the module or file `morf`.""" - return os.path.split(CodeUnit(morf, self.file_locator).filename)[0] - - def _source_for_file(self, filename): - """Return the source file for `filename`.""" - if not filename.endswith(".py"): - if filename.endswith((".pyc", ".pyo")): - filename = filename[:-1] - elif filename.endswith("$py.class"): # jython - filename = filename[:-9] + ".py" - return filename - - def _should_trace_with_reason(self, filename, frame): - """Decide whether to trace execution in `filename`, with a reason. - - This function is called from the trace function. As each new file name - is encountered, this function determines whether it is traced or not. - - Returns a pair of values: the first indicates whether the file should - be traced: it's a canonicalized filename if it should be traced, None - if it should not. The second value is a string, the resason for the - decision. - - """ - if not filename: - # Empty string is pretty useless - return None, "empty string isn't a filename" - - if filename.startswith('<'): - # Lots of non-file execution is represented with artificial - # filenames like "<string>", "<doctest readme.txt[0]>", or - # "<exec_function>". Don't ever trace these executions, since we - # can't do anything with the data later anyway. - return None, "not a real filename" - - self._check_for_packages() - - # Compiled Python files have two filenames: frame.f_code.co_filename is - # the filename at the time the .pyc was compiled. The second name is - # __file__, which is where the .pyc was actually loaded from. Since - # .pyc files can be moved after compilation (for example, by being - # installed), we look for __file__ in the frame and prefer it to the - # co_filename value. - dunder_file = frame.f_globals.get('__file__') - if dunder_file: - filename = self._source_for_file(dunder_file) - - # Jython reports the .class file to the tracer, use the source file. - if filename.endswith("$py.class"): - filename = filename[:-9] + ".py" - - canonical = self.file_locator.canonical_filename(filename) - - # If the user specified source or include, then that's authoritative - # about the outer bound of what to measure and we don't have to apply - # any canned exclusions. If they didn't, then we have to exclude the - # stdlib and coverage.py directories. - if self.source_match: - if not self.source_match.match(canonical): - return None, "falls outside the --source trees" - elif self.include_match: - if not self.include_match.match(canonical): - return None, "falls outside the --include trees" - else: - # If we aren't supposed to trace installed code, then check if this - # is near the Python standard library and skip it if so. - if self.pylib_match and self.pylib_match.match(canonical): - return None, "is in the stdlib" - - # We exclude the coverage code itself, since a little of it will be - # measured otherwise. - if self.cover_match and self.cover_match.match(canonical): - return None, "is part of coverage.py" - - # Check the file against the omit pattern. - if self.omit_match and self.omit_match.match(canonical): - return None, "is inside an --omit pattern" - - return canonical, "because we love you" - - def _should_trace(self, filename, frame): - """Decide whether to trace execution in `filename`. - - Calls `_should_trace_with_reason`, and returns just the decision. - - """ - canonical, reason = self._should_trace_with_reason(filename, frame) - if self.debug.should('trace'): - if not canonical: - msg = "Not tracing %r: %s" % (filename, reason) - else: - msg = "Tracing %r" % (filename,) - self.debug.write(msg) - return canonical - - def _warn(self, msg): - """Use `msg` as a warning.""" - self._warnings.append(msg) - sys.stderr.write("Coverage.py warning: %s\n" % msg) - - def _check_for_packages(self): - """Update the source_match matcher with latest imported packages.""" - # Our self.source_pkgs attribute is a list of package names we want to - # measure. Each time through here, we see if we've imported any of - # them yet. If so, we add its file to source_match, and we don't have - # to look for that package any more. - if self.source_pkgs: - found = [] - for pkg in self.source_pkgs: - try: - mod = sys.modules[pkg] - except KeyError: - continue - - found.append(pkg) - - try: - pkg_file = mod.__file__ - except AttributeError: - pkg_file = None - else: - d, f = os.path.split(pkg_file) - if f.startswith('__init__'): - # This is actually a package, return the directory. - pkg_file = d - else: - pkg_file = self._source_for_file(pkg_file) - pkg_file = self.file_locator.canonical_filename(pkg_file) - if not os.path.exists(pkg_file): - pkg_file = None - - if pkg_file: - self.source.append(pkg_file) - self.source_match.add(pkg_file) - else: - self._warn("Module %s has no Python source." % pkg) - - for pkg in found: - self.source_pkgs.remove(pkg) - - def use_cache(self, usecache): - """Control the use of a data file (incorrectly called a cache). - - `usecache` is true or false, whether to read and write data on disk. - - """ - self.data.usefile(usecache) - - def load(self): - """Load previously-collected coverage data from the data file.""" - self.collector.reset() - self.data.read() - - def start(self): - """Start measuring code coverage. - - Coverage measurement actually occurs in functions called after `start` - is invoked. Statements in the same scope as `start` won't be measured. - - Once you invoke `start`, you must also call `stop` eventually, or your - process might not shut down cleanly. - - """ - if self.run_suffix: - # Calling start() means we're running code, so use the run_suffix - # as the data_suffix when we eventually save the data. - self.data_suffix = self.run_suffix - if self.auto_data: - self.load() + self._inited = True # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) + self.source_pkgs_match = ModuleMatcher(self.source_pkgs) else: - if self.cover_dir: - self.cover_match = TreeMatcher([self.cover_dir]) + if self.cover_dirs: + self.cover_match = TreeMatcher(self.cover_dirs) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: @@ -391,14 +336,344 @@ self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. + wrote_any = False if self.debug.should('config'): - self.debug.write("Configuration values:") config_info = sorted(self.config.__dict__.items()) - self.debug.write_formatted_info(config_info) + self.debug.write_formatted_info("config", config_info) + wrote_any = True if self.debug.should('sys'): - self.debug.write("Debugging info:") - self.debug.write_formatted_info(self.sysinfo()) + self.debug.write_formatted_info("sys", self.sys_info()) + for plugin in self.plugins: + header = "sys: " + plugin._coverage_plugin_name + info = plugin.sys_info() + self.debug.write_formatted_info(header, info) + wrote_any = True + + if wrote_any: + self.debug.write_formatted_info("end", ()) + + def _canonical_dir(self, morf): + """Return the canonical directory of the module or file `morf`.""" + morf_filename = PythonFileReporter(morf, self).filename + return os.path.split(morf_filename)[0] + + def _source_for_file(self, filename): + """Return the source file for `filename`. + + Given a file name being traced, return the best guess as to the source + file to attribute it to. + + """ + if filename.endswith(".py"): + # .py files are themselves source files. + return filename + + elif filename.endswith((".pyc", ".pyo")): + # Bytecode files probably have source files near them. + py_filename = filename[:-1] + if os.path.exists(py_filename): + # Found a .py file, use that. + return py_filename + if env.WINDOWS: + # On Windows, it could be a .pyw file. + pyw_filename = py_filename + "w" + if os.path.exists(pyw_filename): + return pyw_filename + # Didn't find source, but it's probably the .py file we want. + return py_filename + + elif filename.endswith("$py.class"): + # Jython is easy to guess. + return filename[:-9] + ".py" + + # No idea, just use the file name as-is. + return filename + + def _name_for_module(self, module_globals, filename): + """Get the name of the module for a set of globals and file name. + + For configurability's sake, we allow __main__ modules to be matched by + their importable name. + + If loaded via runpy (aka -m), we can usually recover the "original" + full dotted module name, otherwise, we resort to interpreting the + file name to get the module's name. In the case that the module name + can't be determined, None is returned. + + """ + dunder_name = module_globals.get('__name__', None) + + if isinstance(dunder_name, str) and dunder_name != '__main__': + # This is the usual case: an imported module. + return dunder_name + + loader = module_globals.get('__loader__', None) + for attrname in ('fullname', 'name'): # attribute renamed in py3.2 + if hasattr(loader, attrname): + fullname = getattr(loader, attrname) + else: + continue + + if isinstance(fullname, str) and fullname != '__main__': + # Module loaded via: runpy -m + return fullname + + # Script as first argument to Python command line. + inspectedname = inspect.getmodulename(filename) + if inspectedname is not None: + return inspectedname + else: + return dunder_name + + def _should_trace_internal(self, filename, frame): + """Decide whether to trace execution in `filename`, with a reason. + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + + Returns a FileDisposition object. + + """ + original_filename = filename + disp = _disposition_init(self.collector.file_disposition_class, filename) + + def nope(disp, reason): + """Simple helper to make it easy to return NO.""" + disp.trace = False + disp.reason = reason + return disp + + # Compiled Python files have two file names: frame.f_code.co_filename is + # the file name at the time the .pyc was compiled. The second name is + # __file__, which is where the .pyc was actually loaded from. Since + # .pyc files can be moved after compilation (for example, by being + # installed), we look for __file__ in the frame and prefer it to the + # co_filename value. + dunder_file = frame.f_globals.get('__file__') + if dunder_file: + filename = self._source_for_file(dunder_file) + if original_filename and not original_filename.startswith('<'): + orig = os.path.basename(original_filename) + if orig != os.path.basename(filename): + # Files shouldn't be renamed when moved. This happens when + # exec'ing code. If it seems like something is wrong with + # the frame's file name, then just use the original. + filename = original_filename + + if not filename: + # Empty string is pretty useless. + return nope(disp, "empty string isn't a file name") + + if filename.startswith('memory:'): + return nope(disp, "memory isn't traceable") + + if filename.startswith('<'): + # Lots of non-file execution is represented with artificial + # file names like "<string>", "<doctest readme.txt[0]>", or + # "<exec_function>". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return nope(disp, "not a real file name") + + # Jython reports the .class file to the tracer, use the source file. + if filename.endswith("$py.class"): + filename = filename[:-9] + ".py" + + canonical = files.canonical_filename(filename) + disp.canonical_filename = canonical + + # Try the plugins, see if they have an opinion about the file. + plugin = None + for plugin in self.plugins.file_tracers: + if not plugin._coverage_enabled: + continue + + try: + file_tracer = plugin.file_tracer(canonical) + if file_tracer is not None: + file_tracer._coverage_plugin = plugin + disp.trace = True + disp.file_tracer = file_tracer + if file_tracer.has_dynamic_source_filename(): + disp.has_dynamic_filename = True + else: + disp.source_filename = files.canonical_filename( + file_tracer.source_filename() + ) + break + except Exception: + self._warn( + "Disabling plugin %r due to an exception:" % ( + plugin._coverage_plugin_name + ) + ) + traceback.print_exc() + plugin._coverage_enabled = False + continue + else: + # No plugin wanted it: it's Python. + disp.trace = True + disp.source_filename = canonical + + if not disp.has_dynamic_filename: + if not disp.source_filename: + raise CoverageException( + "Plugin %r didn't set source_filename for %r" % + (plugin, disp.original_filename) + ) + reason = self._check_include_omit_etc_internal( + disp.source_filename, frame, + ) + if reason: + nope(disp, reason) + + return disp + + def _check_include_omit_etc_internal(self, filename, frame): + """Check a file name against the include, omit, etc, rules. + + Returns a string or None. String means, don't trace, and is the reason + why. None means no reason found to not trace. + + """ + modulename = self._name_for_module(frame.f_globals, filename) + + # If the user specified source or include, then that's authoritative + # about the outer bound of what to measure and we don't have to apply + # any canned exclusions. If they didn't, then we have to exclude the + # stdlib and coverage.py directories. + if self.source_match: + if self.source_pkgs_match.match(modulename): + if modulename in self.source_pkgs: + self.source_pkgs.remove(modulename) + return None # There's no reason to skip this file. + + if not self.source_match.match(filename): + return "falls outside the --source trees" + elif self.include_match: + if not self.include_match.match(filename): + return "falls outside the --include trees" + else: + # If we aren't supposed to trace installed code, then check if this + # is near the Python standard library and skip it if so. + if self.pylib_match and self.pylib_match.match(filename): + return "is in the stdlib" + + # We exclude the coverage.py code itself, since a little of it + # will be measured otherwise. + if self.cover_match and self.cover_match.match(filename): + return "is part of coverage.py" + + # Check the file against the omit pattern. + if self.omit_match and self.omit_match.match(filename): + return "is inside an --omit pattern" + + # No reason found to skip this file. + return None + + def _should_trace(self, filename, frame): + """Decide whether to trace execution in `filename`. + + Calls `_should_trace_internal`, and returns the FileDisposition. + + """ + disp = self._should_trace_internal(filename, frame) + if self.debug.should('trace'): + self.debug.write(_disposition_debug_msg(disp)) + return disp + + def _check_include_omit_etc(self, filename, frame): + """Check a file name against the include/omit/etc, rules, verbosely. + + Returns a boolean: True if the file should be traced, False if not. + + """ + reason = self._check_include_omit_etc_internal(filename, frame) + if self.debug.should('trace'): + if not reason: + msg = "Including %r" % (filename,) + else: + msg = "Not including %r: %s" % (filename, reason) + self.debug.write(msg) + + return not reason + + def _warn(self, msg): + """Use `msg` as a warning.""" + self._warnings.append(msg) + if self.debug.should('pid'): + msg = "[%d] %s" % (os.getpid(), msg) + sys.stderr.write("Coverage.py warning: %s\n" % msg) + + def get_option(self, option_name): + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + .. versionadded:: 4.0 + + """ + return self.config.get_option(option_name) + + def set_option(self, option_name, value): + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with ``"run:branch"``. + + `value` is the new value for the option. This should be a Python + value where appropriate. For example, use True for booleans, not the + string ``"True"``. + + As an example, calling:: + + cov.set_option("run:branch", True) + + has the same effect as this configuration file:: + + [run] + branch = True + + .. versionadded:: 4.0 + + """ + self.config.set_option(option_name, value) + + def use_cache(self, usecache): + """Obsolete method.""" + self._init() + if not usecache: + self._warn("use_cache(False) is no longer supported.") + + def load(self): + """Load previously-collected coverage data from the data file.""" + self._init() + self.collector.reset() + self.data_files.read(self.data) + + def start(self): + """Start measuring code coverage. + + Coverage measurement actually occurs in functions called after + :meth:`start` is invoked. Statements in the same scope as + :meth:`start` won't be measured. + + Once you invoke :meth:`start`, you must also call :meth:`stop` + eventually, or your process might not shut down cleanly. + + """ + self._init() + if self.run_suffix: + # Calling start() means we're running code, so use the run_suffix + # as the data_suffix when we eventually save the data. + self.data_suffix = self.run_suffix + if self._auto_data: + self.load() self.collector.start() self._started = True @@ -406,14 +681,15 @@ def stop(self): """Stop measuring code coverage.""" + if self._started: + self.collector.stop() self._started = False - self.collector.stop() def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() - if self.auto_data: + if self._auto_data: self.save() def erase(self): @@ -423,11 +699,14 @@ discarding the data file. """ + self._init() self.collector.reset() self.data.erase() + self.data_files.erase(parallel=self.config.parallel) def clear_exclude(self, which='exclude'): """Clear the exclude list.""" + self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() @@ -446,6 +725,7 @@ is marked for special treatment during reporting. """ + self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() @@ -464,79 +744,86 @@ def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. - `which` indicates which list is desired. See `exclude` for the lists - that are available, and their meaning. + `which` indicates which list is desired. See :meth:`exclude` for the + lists that are available, and their meaning. """ + self._init() return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" - data_suffix = self.data_suffix - if data_suffix is True: - # If data_suffix was a simple true value, then make a suffix with - # plenty of distinguishing information. We do this here in - # `save()` at the last minute so that the pid will be correct even - # if the process forks. - extra = "" - if _TEST_NAME_FILE: - f = open(_TEST_NAME_FILE) - test_name = f.read() - f.close() - extra = "." + test_name - data_suffix = "%s%s.%s.%06d" % ( - socket.gethostname(), extra, os.getpid(), - random.randint(0, 999999) - ) + self._init() + self.get_data() + self.data_files.write(self.data, suffix=self.data_suffix) - self._harvest_data() - self.data.write(suffix=data_suffix) - - def combine(self): + def combine(self, data_paths=None): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. + `data_paths` is a list of files or directories from which data should + be combined. If no list is passed, then the data files from the + directory indicated by the current data file (probably the current + directory) will be combined. + + .. versionadded:: 4.0 + The `data_paths` parameter. + """ + self._init() + self.get_data() + aliases = None if self.config.paths: - aliases = PathAliases(self.file_locator) + aliases = PathAliases() for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) - self.data.combine_parallel_data(aliases=aliases) - def _harvest_data(self): + self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths) + + def get_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. + Returns a :class:`coverage.CoverageData`, the collected coverage data. + + .. versionadded:: 4.0 + """ + self._init() if not self._measured: return - self.data.add_line_data(self.collector.get_line_data()) - self.data.add_arc_data(self.collector.get_arc_data()) - self.collector.reset() + self.collector.save_data(self.data) # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: - self._warn("Module %s was never imported." % pkg) + if pkg not in sys.modules: + self._warn("Module %s was never imported." % pkg) + elif not ( + hasattr(sys.modules[pkg], '__file__') and + os.path.exists(sys.modules[pkg].__file__) + ): + self._warn("Module %s has no Python source." % pkg) + else: + self._warn("Module %s was previously imported, but not measured." % pkg) # Find out if we got any data. - summary = self.data.summary() - if not summary and self._warn_no_data: + if not self.data and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): - py_file = self.file_locator.canonical_filename(py_file) + py_file = files.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back @@ -545,7 +832,20 @@ self.data.touch_file(py_file) + # Add run information. + self.data.add_run_info( + brief_sys=" ".join([ + platform.python_implementation(), + platform.python_version(), + platform.system(), + ]) + ) + + if self.config.note: + self.data.add_run_info(note=self.config.note) + self._measured = False + return self.data # Backward compatibility with version 1. def analysis(self, morf): @@ -556,10 +856,10 @@ def analysis2(self, morf): """Analyze a module. - `morf` is a module or a filename. It will be analyzed to determine + `morf` is a module or a file name. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: - * The filename for the module. + * The file name for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from @@ -570,6 +870,7 @@ coverage data. """ + self._init() analysis = self._analyze(morf) return ( analysis.filename, @@ -585,38 +886,91 @@ Returns an `Analysis` object. """ - self._harvest_data() - if not isinstance(it, CodeUnit): - it = code_unit_factory(it, self.file_locator)[0] + self.get_data() + if not isinstance(it, FileReporter): + it = self._get_file_reporter(it) + + return Analysis(self.data, it) + + def _get_file_reporter(self, morf): + """Get a FileReporter for a module or file name.""" + plugin = None + file_reporter = "python" + + if isinstance(morf, string_class): + abs_morf = abs_file(morf) + plugin_name = self.data.file_tracer(abs_morf) + if plugin_name: + plugin = self.plugins.get(plugin_name) + + if plugin: + file_reporter = plugin.file_reporter(abs_morf) + if file_reporter is None: + raise CoverageException( + "Plugin %r did not provide a file reporter for %r." % ( + plugin._coverage_plugin_name, morf + ) + ) + + if file_reporter == "python": + file_reporter = PythonFileReporter(morf, self) + + return file_reporter - return Analysis(self, it) + def _get_file_reporters(self, morfs=None): + """Get a list of FileReporters for a list of modules or file names. + + For each module or file name in `morfs`, find a FileReporter. Return + the list of FileReporters. + + If `morfs` is a single module or file name, this returns a list of one + FileReporter. If `morfs` is empty or None, then the list of all files + measured is used to find the FileReporters. + + """ + if not morfs: + morfs = self.data.measured_files() - def report(self, morfs=None, show_missing=True, ignore_errors=None, - file=None, # pylint: disable=W0622 - omit=None, include=None - ): + # Be sure we have a list. + if not isinstance(morfs, (list, tuple)): + morfs = [morfs] + + file_reporters = [] + for morf in morfs: + file_reporter = self._get_file_reporter(morf) + file_reporters.append(file_reporter) + + return file_reporters + + def report( + self, morfs=None, show_missing=True, ignore_errors=None, + file=None, # pylint: disable=redefined-builtin + omit=None, include=None, skip_covered=False, + ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. - `include` is a list of filename patterns. Modules whose filenames - match those patterns will be included in the report. Modules matching - `omit` will not be included in the report. + `include` is a list of file name patterns. Files that match will be + included in the report. Files matching `omit` will not be included in + the report. Returns a float, the total percentage covered. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, - show_missing=show_missing, + show_missing=show_missing, skip_covered=skip_covered, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) - def annotate(self, morfs=None, directory=None, ignore_errors=None, - omit=None, include=None): + def annotate( + self, morfs=None, directory=None, ignore_errors=None, + omit=None, include=None, + ): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new @@ -624,10 +978,10 @@ marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". - See `coverage.report()` for other arguments. + See :meth:`report` for other arguments. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) @@ -648,12 +1002,12 @@ `title` is a text string (not HTML) to use as the title of the HTML report. - See `coverage.report()` for other arguments. + See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, @@ -661,8 +1015,10 @@ reporter = HtmlReporter(self, self.config) return reporter.report(morfs) - def xml_report(self, morfs=None, outfile=None, ignore_errors=None, - omit=None, include=None): + def xml_report( + self, morfs=None, outfile=None, ignore_errors=None, + omit=None, include=None, + ): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. @@ -670,12 +1026,12 @@ Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. - See `coverage.report()` for other arguments. + See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, @@ -686,69 +1042,112 @@ if self.config.xml_output == '-': outfile = sys.stdout else: + # Ensure that the output directory is created; done here + # because this report pre-opens the output file. + # HTMLReport does this using the Report plumbing because + # its task is more complex, being multiple files. + output_dir = os.path.dirname(self.config.xml_output) + if output_dir and not os.path.isdir(output_dir): + os.makedirs(output_dir) outfile = open(self.config.xml_output, "w") file_to_close = outfile try: - try: - reporter = XmlReporter(self, self.config) - return reporter.report(morfs, outfile=outfile) - except CoverageException: - delete_file = True - raise + reporter = XmlReporter(self, self.config) + return reporter.report(morfs, outfile=outfile) + except CoverageException: + delete_file = True + raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) - def sysinfo(self): + def sys_info(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod - import platform, re + + self._init() - try: - implementation = platform.python_implementation() - except AttributeError: - implementation = "unknown" + ft_plugins = [] + for ft in self.plugins.file_tracers: + ft_name = ft._coverage_plugin_name + if not ft._coverage_enabled: + ft_name += " (disabled)" + ft_plugins.append(ft_name) info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), - ('cover_dir', self.cover_dir), + ('cover_dirs', self.cover_dirs), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), + ('plugins.file_tracers', ft_plugins), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), - ('data_path', self.data.filename), + ('data_path', self.data_files.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), - ('implementation', implementation), + ('implementation', platform.python_implementation()), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), - ('environment', sorted([ - ("%s = %s" % (k, v)) for k, v in iitems(os.environ) - if re.search(r"^COV|^PY", k) - ])), + ('environment', sorted( + ("%s = %s" % (k, v)) + for k, v in iitems(os.environ) + if k.startswith(("COV", "PY")) + )), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] - if self.source_match: - info.append(('source_match', self.source_match.info())) - if self.include_match: - info.append(('include_match', self.include_match.info())) - if self.omit_match: - info.append(('omit_match', self.omit_match.info())) - if self.cover_match: - info.append(('cover_match', self.cover_match.info())) - if self.pylib_match: - info.append(('pylib_match', self.pylib_match.info())) + + matcher_names = [ + 'source_match', 'source_pkgs_match', + 'include_match', 'omit_match', + 'cover_match', 'pylib_match', + ] + + for matcher_name in matcher_names: + matcher = getattr(self, matcher_name) + if matcher: + matcher_info = matcher.info() + else: + matcher_info = '-none-' + info.append((matcher_name, matcher_info)) return info +# FileDisposition "methods": FileDisposition is a pure value object, so it can +# be implemented in either C or Python. Acting on them is done with these +# functions. + +def _disposition_init(cls, original_filename): + """Construct and initialize a new FileDisposition object.""" + disp = cls() + disp.original_filename = original_filename + disp.canonical_filename = original_filename + disp.source_filename = None + disp.trace = False + disp.reason = "" + disp.file_tracer = None + disp.has_dynamic_filename = False + return disp + + +def _disposition_debug_msg(disp): + """Make a nice debug message of what the FileDisposition is doing.""" + if disp.trace: + msg = "Tracing %r" % (disp.original_filename,) + if disp.file_tracer: + msg += ": will be traced by %r" % disp.file_tracer + else: + msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason) + return msg + + def process_startup(): - """Call this at Python startup to perhaps measure coverage. + """Call this at Python start-up to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage measurement is started. The value of the variable is the config file @@ -768,12 +1167,27 @@ """ cps = os.environ.get("COVERAGE_PROCESS_START") - if cps: - cov = coverage(config_file=cps, auto_data=True) - cov.start() - cov._warn_no_data = False - cov._warn_unimported_source = False + if not cps: + # No request for coverage, nothing to do. + return + # This function can be called more than once in a process. This happens + # because some virtualenv configurations make the same directory visible + # twice in sys.path. This means that the .pth file will be found twice, + # and executed twice, executing this function twice. We set a global + # flag (an attribute on this function) to indicate that coverage.py has + # already been started, so we can avoid doing it twice. + # + # https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more + # details. -# A hack for debugging testing in subprocesses. -_TEST_NAME_FILE = "" #"/tmp/covtest.txt" + if hasattr(process_startup, "done"): + # We've annotated this function before, so we must have already + # started coverage.py in this process. Nothing to do. + return + + process_startup.done = True + cov = Coverage(config_file=cps, auto_data=True) + cov.start() + cov._warn_no_data = False + cov._warn_unimported_source = False