--- a/eric6/DebugClients/Python/coverage/collector.py Wed Feb 19 19:38:36 2020 +0100 +++ b/eric6/DebugClients/Python/coverage/collector.py Sat Feb 22 14:27:42 2020 +0100 @@ -1,5 +1,5 @@ # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 -# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Raw data collector for coverage.py.""" @@ -9,7 +9,7 @@ from coverage import env from coverage.backward import litems, range # pylint: disable=redefined-builtin from coverage.debug import short_stack -from coverage.files import abs_file +from coverage.disposition import FileDisposition from coverage.misc import CoverageException, isolate_module from coverage.pytracer import PyTracer @@ -33,19 +33,6 @@ CTracer = None -class FileDisposition(object): - """A simple value type for recording what to do with a file.""" - pass - - -def should_start_context(frame): - """Who-Tests-What hack: Determine whether this frame begins a new who-context.""" - fn_name = frame.f_code.co_name - if fn_name.startswith("test"): - return fn_name - return None - - class Collector(object): """Collects trace data. @@ -70,7 +57,10 @@ # The concurrency settings we support here. SUPPORTED_CONCURRENCIES = set(["greenlet", "eventlet", "gevent", "thread"]) - def __init__(self, should_trace, check_include, timid, branch, warn, concurrency): + def __init__( + self, should_trace, check_include, should_start_context, file_mapper, + timid, branch, warn, concurrency, + ): """Create a collector. `should_trace` is a function, taking a file name and a frame, and @@ -79,6 +69,15 @@ `check_include` is a function taking a file name and a frame. It returns a boolean: True if the file should be traced, False if not. + `should_start_context` is a function taking a frame, and returning a + string. If the frame should be the start of a new context, the string + is the new context. If the frame should not be the start of a new + context, return None. + + `file_mapper` is a function taking a filename, and returning a Unicode + filename. The result is the name that will be recorded in the data + file. + If `timid` is true, then a slower simpler trace function will be used. This is important for some environments where manipulation of tracing functions make the faster more sophisticated trace function not @@ -100,13 +99,19 @@ """ self.should_trace = should_trace self.check_include = check_include + self.should_start_context = should_start_context + self.file_mapper = file_mapper self.warn = warn self.branch = branch self.threading = None + self.covdata = None + + self.static_context = None self.origin = short_stack() self.concur_id_func = None + self.mapped_file_cache = {} # We can handle a few concurrency options here, but only one at a time. these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency) @@ -139,10 +144,6 @@ ) ) - # Who-Tests-What is just a hack at the moment, so turn it on with an - # environment variable. - self.wtw = int(os.getenv('COVERAGE_WTW', 0)) - self.reset() if timid: @@ -163,13 +164,23 @@ def __repr__(self): return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name()) + def use_data(self, covdata, context): + """Use `covdata` for recording data.""" + self.covdata = covdata + self.static_context = context + self.covdata.set_context(self.static_context) + def tracer_name(self): """Return the class name of the tracer we're using.""" return self._trace_class.__name__ def _clear_data(self): """Clear out existing data, but stay ready for more collection.""" - self.data.clear() + # We used to used self.data.clear(), but that would remove filename + # keys and data values that were still in use higher up the stack + # when we are called as part of switch_context. + for d in self.data.values(): + d.clear() for tracer in self.tracers: tracer.reset_activity() @@ -181,10 +192,6 @@ # pairs as keys (if branch coverage). self.data = {} - # A dict mapping contexts to data dictionaries. - self.contexts = {} - self.contexts[None] = self.data - # A dictionary mapping file names to file tracer plugin names that will # handle them. self.file_tracers = {} @@ -246,11 +253,9 @@ tracer.threading = self.threading if hasattr(tracer, 'check_include'): tracer.check_include = self.check_include - if self.wtw: - if hasattr(tracer, 'should_start_context'): - tracer.should_start_context = should_start_context - if hasattr(tracer, 'switch_context'): - tracer.switch_context = self.switch_context + if hasattr(tracer, 'should_start_context'): + tracer.should_start_context = self.should_start_context + tracer.switch_context = self.switch_context fn = tracer.start() self.tracers.append(tracer) @@ -366,52 +371,59 @@ return any(tracer.activity() for tracer in self.tracers) def switch_context(self, new_context): - """Who-Tests-What hack: switch to a new who-context.""" - # Make a new data dict, or find the existing one, and switch all the - # tracers to use it. - data = self.contexts.setdefault(new_context, {}) - for tracer in self.tracers: - tracer.data = data + """Switch to a new dynamic context.""" + self.flush_data() + if self.static_context: + context = self.static_context + if new_context: + context += "|" + new_context + else: + context = new_context + self.covdata.set_context(context) + + def cached_mapped_file(self, filename): + """A locally cached version of file names mapped through file_mapper.""" + key = (type(filename), filename) + try: + return self.mapped_file_cache[key] + except KeyError: + return self.mapped_file_cache.setdefault(key, self.file_mapper(filename)) - def save_data(self, covdata): - """Save the collected data to a `CoverageData`. + def mapped_file_dict(self, d): + """Return a dict like d, but with keys modified by file_mapper.""" + # The call to litems() ensures that the GIL protects the dictionary + # iterator against concurrent modifications by tracers running + # in other threads. We try three times in case of concurrent + # access, hoping to get a clean copy. + runtime_err = None + for _ in range(3): + try: + items = litems(d) + except RuntimeError as ex: + runtime_err = ex + else: + break + else: + raise runtime_err + + return dict((self.cached_mapped_file(k), v) for k, v in items if v) + + def flush_data(self): + """Save the collected data to our associated `CoverageData`. + + Data may have also been saved along the way. This forces the + last of the data to be saved. Returns True if there was data to save, False if not. """ if not self._activity(): return False - def abs_file_dict(d): - """Return a dict like d, but with keys modified by `abs_file`.""" - # The call to litems() ensures that the GIL protects the dictionary - # iterator against concurrent modifications by tracers running - # in other threads. We try three times in case of concurrent - # access, hoping to get a clean copy. - runtime_err = None - for _ in range(3): - try: - items = litems(d) - except RuntimeError as ex: - runtime_err = ex - else: - break - else: - raise runtime_err # pylint: disable=raising-bad-type - - return dict((abs_file(k), v) for k, v in items) - if self.branch: - covdata.add_arcs(abs_file_dict(self.data)) + self.covdata.add_arcs(self.mapped_file_dict(self.data)) else: - covdata.add_lines(abs_file_dict(self.data)) - covdata.add_file_tracers(abs_file_dict(self.file_tracers)) - - if self.wtw: - # Just a hack, so just hack it. - import pprint - out_file = "coverage_wtw_{:06}.py".format(os.getpid()) - with open(out_file, "w") as wtw_out: - pprint.pprint(self.contexts, wtw_out) + self.covdata.add_lines(self.mapped_file_dict(self.data)) + self.covdata.add_file_tracers(self.mapped_file_dict(self.file_tracers)) self._clear_data() return True