coverage: updated coverage.py to 5.0.3.

Sat, 22 Feb 2020 14:27:42 +0100

author
Detlev Offenbach <detlev@die-offenbachs.de>
date
Sat, 22 Feb 2020 14:27:42 +0100
changeset 7427
362cd1b6f81a
parent 7426
dc171b1d8261
child 7428
27c55a3d0b89
child 7429
6983c461550f

coverage: updated coverage.py to 5.0.3.

docs/changelog file | annotate | diff | comparison | revisions
eric6.e4p file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/DebugClientBase.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/__init__.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/__main__.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/annotate.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/backunittest.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/backward.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/bytecode.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/cmdline.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/collector.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/config.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/context.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/control.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/data.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/debug.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/disposition.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/doc/CHANGES.rst file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/doc/CONTRIBUTORS.txt file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/doc/README.rst file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/env.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/execfile.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/files.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/html.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/inorout.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/jsonreport.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/misc.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/multiproc.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/numbits.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/optional.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/parser.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/phystokens.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/pickle2json.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/plugin.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/plugin_support.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/python.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/pytracer.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/report.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/results.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/sqldata.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/summary.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/templite.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/tomlconfig.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/version.py file | annotate | diff | comparison | revisions
eric6/DebugClients/Python/coverage/xmlreport.py file | annotate | diff | comparison | revisions
--- a/docs/changelog	Wed Feb 19 19:38:36 2020 +0100
+++ b/docs/changelog	Sat Feb 22 14:27:42 2020 +0100
@@ -8,6 +8,8 @@
   -- added support for the '--secure' flag of hg import as of Mercurial 5.3
 - Syntax Checker
   -- updated pyflakes to repository as of 2020-02-03
+- Third Party packages
+  -- updated coverage.py to 5.0.3
 
 Version 20.2:
 - bug fixes
--- a/eric6.e4p	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6.e4p	Sat Feb 22 14:27:42 2020 +0100
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <!DOCTYPE Project SYSTEM "Project-6.3.dtd">
 <!-- eric project file for project eric6 -->
-<!-- Copyright (C) 2019 Detlev Offenbach, detlev@die-offenbachs.de -->
+<!-- Copyright (C) 2020 Detlev Offenbach, detlev@die-offenbachs.de -->
 <Project version="6.3">
   <Language>en_US</Language>
   <ProjectWordList>Dictionaries/words.dic</ProjectWordList>
@@ -61,26 +61,33 @@
     <Source>eric6/DebugClients/Python/coverage/cmdline.py</Source>
     <Source>eric6/DebugClients/Python/coverage/collector.py</Source>
     <Source>eric6/DebugClients/Python/coverage/config.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/context.py</Source>
     <Source>eric6/DebugClients/Python/coverage/control.py</Source>
     <Source>eric6/DebugClients/Python/coverage/data.py</Source>
     <Source>eric6/DebugClients/Python/coverage/debug.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/disposition.py</Source>
     <Source>eric6/DebugClients/Python/coverage/env.py</Source>
     <Source>eric6/DebugClients/Python/coverage/execfile.py</Source>
     <Source>eric6/DebugClients/Python/coverage/files.py</Source>
     <Source>eric6/DebugClients/Python/coverage/html.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/inorout.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/jsonreport.py</Source>
     <Source>eric6/DebugClients/Python/coverage/misc.py</Source>
     <Source>eric6/DebugClients/Python/coverage/multiproc.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/numbits.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/optional.py</Source>
     <Source>eric6/DebugClients/Python/coverage/parser.py</Source>
     <Source>eric6/DebugClients/Python/coverage/phystokens.py</Source>
-    <Source>eric6/DebugClients/Python/coverage/pickle2json.py</Source>
     <Source>eric6/DebugClients/Python/coverage/plugin.py</Source>
     <Source>eric6/DebugClients/Python/coverage/plugin_support.py</Source>
     <Source>eric6/DebugClients/Python/coverage/python.py</Source>
     <Source>eric6/DebugClients/Python/coverage/pytracer.py</Source>
     <Source>eric6/DebugClients/Python/coverage/report.py</Source>
     <Source>eric6/DebugClients/Python/coverage/results.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/sqldata.py</Source>
     <Source>eric6/DebugClients/Python/coverage/summary.py</Source>
     <Source>eric6/DebugClients/Python/coverage/templite.py</Source>
+    <Source>eric6/DebugClients/Python/coverage/tomlconfig.py</Source>
     <Source>eric6/DebugClients/Python/coverage/version.py</Source>
     <Source>eric6/DebugClients/Python/coverage/xmlreport.py</Source>
     <Source>eric6/DebugClients/Python/eric6dbgstub.py</Source>
--- a/eric6/DebugClients/Python/DebugClientBase.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/DebugClientBase.py	Sat Feb 22 14:27:42 2020 +0100
@@ -518,7 +518,7 @@
                 self.mainThread.run(code, self.debugMod.__dict__, debug=False)
 
         elif method == "RequestCoverage":
-            from coverage import coverage
+            from coverage import Coverage
             sys.argv = []
             self.__setCoding(params["filename"])
             sys.argv.append(params["filename"])
@@ -535,7 +535,7 @@
             self.__interceptSignals()
             
             # generate a coverage object
-            self.cover = coverage(
+            self.cover = Coverage(
                 auto_data=True,
                 data_file="{0}.coverage".format(
                     os.path.splitext(sys.argv[0])[0]))
@@ -906,8 +906,8 @@
             
             # generate a coverage object
             if params["coverage"]:
-                from coverage import coverage
-                self.cover = coverage(
+                from coverage import Coverage
+                self.cover = Coverage(
                     auto_data=True,
                     data_file="{0}.coverage".format(
                         os.path.splitext(params["coveragefile"])[0]))
--- a/eric6/DebugClients/Python/coverage/__init__.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/__init__.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Code coverage measurement for Python.
 
@@ -8,11 +8,12 @@
 
 """
 
+import sys
+
 from coverage.version import __version__, __url__, version_info
 
 from coverage.control import Coverage, process_startup
 from coverage.data import CoverageData
-from coverage.debug import enable_aspectlib_maybe
 from coverage.misc import CoverageException
 from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
 from coverage.pytracer import PyTracer
@@ -20,19 +21,15 @@
 # Backward compatibility.
 coverage = Coverage
 
-# Possibly enable aspectlib to debug our execution.
-enable_aspectlib_maybe()
-
 # On Windows, we encode and decode deep enough that something goes wrong and
 # the encodings.utf_8 module is loaded and then unloaded, I don't know why.
 # Adding a reference here prevents it from being unloaded.  Yuk.
-import encodings.utf_8
+import encodings.utf_8      # pylint: disable=wrong-import-position, wrong-import-order
 
 # Because of the "from coverage.control import fooey" lines at the top of the
 # file, there's an entry for coverage.coverage in sys.modules, mapped to None.
 # This makes some inspection tools (like pydoc) unable to find the class
 # coverage.coverage.  So remove that entry.
-import sys
 try:
     del sys.modules['coverage.coverage']
 except KeyError:
--- a/eric6/DebugClients/Python/coverage/__main__.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/__main__.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Coverage.py's main entry point."""
 
--- a/eric6/DebugClients/Python/coverage/annotate.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/annotate.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Source file annotation for coverage.py."""
 
@@ -8,13 +8,13 @@
 import re
 
 from coverage.files import flat_rootname
-from coverage.misc import isolate_module
-from coverage.report import Reporter
+from coverage.misc import ensure_dir, isolate_module
+from coverage.report import get_analysis_to_report
 
 os = isolate_module(os)
 
 
-class AnnotateReporter(Reporter):
+class AnnotateReporter(object):
     """Generate annotated source files showing line coverage.
 
     This reporter creates annotated copies of the measured source files. Each
@@ -36,8 +36,9 @@
 
     """
 
-    def __init__(self, coverage, config):
-        super(AnnotateReporter, self).__init__(coverage, config)
+    def __init__(self, coverage):
+        self.coverage = coverage
+        self.config = self.coverage.config
         self.directory = None
 
     blank_re = re.compile(r"\s*(#|$)")
@@ -49,7 +50,10 @@
         See `coverage.report()` for arguments.
 
         """
-        self.report_files(self.annotate_file, morfs, directory)
+        self.directory = directory
+        self.coverage.get_data()
+        for fr, analysis in get_analysis_to_report(self.coverage, morfs):
+            self.annotate_file(fr, analysis)
 
     def annotate_file(self, fr, analysis):
         """Annotate a single file.
@@ -62,6 +66,7 @@
         excluded = sorted(analysis.excluded)
 
         if self.directory:
+            ensure_dir(self.directory)
             dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
             if dest_file.endswith("_py"):
                 dest_file = dest_file[:-3] + ".py"
--- a/eric6/DebugClients/Python/coverage/backunittest.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/backunittest.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,14 +1,9 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Implementations of unittest features from the future."""
 
-# Use unittest2 if it's available, otherwise unittest.  This gives us
-# back-ported features for 2.6.
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
+import unittest
 
 
 def unittest_has(method):
@@ -23,15 +18,11 @@
     `unittest` doesn't have them.
 
     """
-    # pylint: disable=missing-docstring
+    # pylint: disable=arguments-differ, deprecated-method
 
-    # Many Pythons have this method defined.  But PyPy3 has a bug with it
-    # somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our
-    # own implementation that works everywhere, at least for the ways we're
-    # calling it.
-    def assertCountEqual(self, s1, s2):
-        """Assert these have the same elements, regardless of order."""
-        self.assertEqual(sorted(s1), sorted(s2))
+    if not unittest_has('assertCountEqual'):
+        def assertCountEqual(self, *args, **kwargs):
+            return self.assertItemsEqual(*args, **kwargs)
 
     if not unittest_has('assertRaisesRegex'):
         def assertRaisesRegex(self, *args, **kwargs):
--- a/eric6/DebugClients/Python/coverage/backward.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/backward.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,11 +1,12 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Add things to old Pythons so I can pretend they are newer."""
 
-# This file does tricky stuff, so disable a pylint warning.
+# This file's purpose is to provide modules to be imported from here.
 # pylint: disable=unused-import
 
+import os
 import sys
 
 from coverage import env
@@ -38,18 +39,32 @@
 except NameError:
     unicode_class = str
 
-# Where do pickles come from?
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
 # range or xrange?
 try:
     range = xrange      # pylint: disable=redefined-builtin
 except NameError:
     range = range
 
+try:
+    from itertools import zip_longest
+except ImportError:
+    from itertools import izip_longest as zip_longest
+
+# Where do we get the thread id from?
+try:
+    from thread import get_ident as get_thread_id
+except ImportError:
+    from threading import get_ident as get_thread_id
+
+try:
+    os.PathLike
+except AttributeError:
+    # This is Python 2 and 3
+    path_types = (bytes, string_class, unicode_class)
+else:
+    # 3.6+
+    path_types = (bytes, str, os.PathLike)
+
 # shlex.quote is new, but there's an undocumented implementation in "pipes",
 # who knew!?
 try:
@@ -59,6 +74,11 @@
     # in Python versions earlier than 3.3.
     from pipes import quote as shlex_quote
 
+try:
+    import reprlib
+except ImportError:
+    import repr as reprlib
+
 # A function to iterate listlessly over a dict's items, and one to get the
 # items as a list.
 try:
@@ -101,10 +121,18 @@
         """Convert string `s` to bytes."""
         return s.encode('utf8')
 
+    def to_string(b):
+        """Convert bytes `b` to string."""
+        return b.decode('utf8')
+
     def binary_bytes(byte_values):
         """Produce a byte string with the ints from `byte_values`."""
         return bytes(byte_values)
 
+    def byte_to_int(byte):
+        """Turn a byte indexed from a bytes object into an int."""
+        return byte
+
     def bytes_to_ints(bytes_value):
         """Turn a bytes object into a sequence of ints."""
         # In Python 3, iterating bytes gives ints.
@@ -115,10 +143,18 @@
         """Convert string `s` to bytes (no-op in 2.x)."""
         return s
 
+    def to_string(b):
+        """Convert bytes `b` to string."""
+        return b
+
     def binary_bytes(byte_values):
         """Produce a byte string with the ints from `byte_values`."""
         return "".join(chr(b) for b in byte_values)
 
+    def byte_to_int(byte):
+        """Turn a byte indexed from a bytes object into an int."""
+        return ord(byte)
+
     def bytes_to_ints(bytes_value):
         """Turn a bytes object into a sequence of ints."""
         for byte in bytes_value:
@@ -155,6 +191,32 @@
     PYC_MAGIC_NUMBER = imp.get_magic()
 
 
+def code_object(fn):
+    """Get the code object from a function."""
+    try:
+        return fn.func_code
+    except AttributeError:
+        return fn.__code__
+
+
+try:
+    from types import SimpleNamespace
+except ImportError:
+    # The code from https://docs.python.org/3/library/types.html#types.SimpleNamespace
+    class SimpleNamespace:
+        """Python implementation of SimpleNamespace, for Python 2."""
+        def __init__(self, **kwargs):
+            self.__dict__.update(kwargs)
+
+        def __repr__(self):
+            keys = sorted(self.__dict__)
+            items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
+            return "{}({})".format(type(self).__name__, ", ".join(items))
+
+        def __eq__(self, other):
+            return self.__dict__ == other.__dict__
+
+
 def invalidate_import_caches():
     """Invalidate any import caches that may or may not exist."""
     if importlib and hasattr(importlib, "invalidate_caches"):
@@ -177,6 +239,7 @@
     if modfile is None:
         modfile = modname + '.py'
     if SourceFileLoader:
+        # pylint: disable=no-value-for-parameter, deprecated-method
         mod = SourceFileLoader(modname, modfile).load_module()
     else:
         for suff in imp.get_suffixes():                 # pragma: part covered
--- a/eric6/DebugClients/Python/coverage/bytecode.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/bytecode.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,22 +1,19 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Bytecode manipulation for coverage.py"""
 
 import types
 
 
-class CodeObjects(object):
+def code_objects(code):
     """Iterate over all the code objects in `code`."""
-    def __init__(self, code):
-        self.stack = [code]
-
-    def __iter__(self):
-        while self.stack:
-            # We're going to return the code object on the stack, but first
-            # push its children for later returning.
-            code = self.stack.pop()
-            for c in code.co_consts:
-                if isinstance(c, types.CodeType):
-                    self.stack.append(c)
-            yield code
+    stack = [code]
+    while stack:
+        # We're going to return the code object on the stack, but first
+        # push its children for later returning.
+        code = stack.pop()
+        for c in code.co_consts:
+            if isinstance(c, types.CodeType):
+                stack.append(c)
+        yield code
--- a/eric6/DebugClients/Python/coverage/cmdline.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/cmdline.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Command-line support for coverage.py."""
 
@@ -8,15 +8,19 @@
 import glob
 import optparse
 import os.path
+import shlex
 import sys
 import textwrap
 import traceback
 
+import coverage
+from coverage import Coverage
 from coverage import env
 from coverage.collector import CTracer
+from coverage.data import line_counts
 from coverage.debug import info_formatter, info_header
-from coverage.execfile import run_python_file, run_python_module
-from coverage.misc import BaseCoverageException, ExceptionDuringRun, NoSource
+from coverage.execfile import PyRunner
+from coverage.misc import BaseCoverageException, ExceptionDuringRun, NoSource, output_encoding
 from coverage.results import should_fail_under
 
 
@@ -42,9 +46,13 @@
             "Valid values are: %s."
         ) % ", ".join(CONCURRENCY_CHOICES),
     )
+    context = optparse.make_option(
+        '', '--context', action='store', metavar="LABEL",
+        help="The context label to record for this coverage run.",
+    )
     debug = optparse.make_option(
         '', '--debug', action='store', metavar="OPTS",
-        help="Debug options, separated by commas",
+        help="Debug options, separated by commas. [env: COVERAGE_DEBUG]",
     )
     directory = optparse.make_option(
         '-d', '--directory', action='store', metavar="DIR",
@@ -85,6 +93,14 @@
         '--skip-covered', action='store_true',
         help="Skip files with 100% coverage.",
     )
+    skip_empty = optparse.make_option(
+        '--skip-empty', action='store_true',
+        help="Skip files with no code.",
+    )
+    show_contexts = optparse.make_option(
+        '--show-contexts', action='store_true',
+        help="Show contexts for covered lines.",
+    )
     omit = optparse.make_option(
         '', '--omit', action='store',
         metavar="PAT1,PAT2,...",
@@ -93,11 +109,28 @@
             "Accepts shell-style wildcards, which must be quoted."
         ),
     )
+    contexts = optparse.make_option(
+        '', '--contexts', action='store',
+        metavar="REGEX1,REGEX2,...",
+        help=(
+            "Only display data from lines covered in the given contexts. "
+            "Accepts Python regexes, which must be quoted."
+        ),
+    )
     output_xml = optparse.make_option(
         '-o', '', action='store', dest="outfile",
         metavar="OUTFILE",
         help="Write the XML report to this file. Defaults to 'coverage.xml'",
     )
+    output_json = optparse.make_option(
+        '-o', '', action='store', dest="outfile",
+        metavar="OUTFILE",
+        help="Write the JSON report to this file. Defaults to 'coverage.json'",
+    )
+    json_pretty_print = optparse.make_option(
+        '', '--pretty-print', action='store_true',
+        help="Format the JSON for human readers.",
+    )
     parallel_mode = optparse.make_option(
         '-p', '--parallel-mode', action='store_true',
         help=(
@@ -116,8 +149,9 @@
     rcfile = optparse.make_option(
         '', '--rcfile', action='store',
         help=(
-            "Specify configuration file.  "
-            "By default '.coveragerc', 'setup.cfg' and 'tox.ini' are tried."
+            "Specify configuration file. "
+            "By default '.coveragerc', 'setup.cfg', 'tox.ini', and "
+            "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]"
         ),
     )
     source = optparse.make_option(
@@ -127,7 +161,7 @@
     timid = optparse.make_option(
         '', '--timid', action='store_true',
         help=(
-            "Use a simpler but slower trace method.  Try this if you get "
+            "Use a simpler but slower trace method. Try this if you get "
             "seemingly impossible results!"
         ),
     )
@@ -158,6 +192,7 @@
             append=None,
             branch=None,
             concurrency=None,
+            context=None,
             debug=None,
             directory=None,
             fail_under=None,
@@ -166,11 +201,14 @@
             include=None,
             module=None,
             omit=None,
+            contexts=None,
             parallel_mode=None,
             pylib=None,
             rcfile=True,
             show_missing=None,
             skip_covered=None,
+            skip_empty=None,
+            show_contexts=None,
             source=None,
             timid=None,
             title=None,
@@ -178,11 +216,6 @@
             )
 
         self.disable_interspersed_args()
-        self.help_fn = self.help_noop
-
-    def help_noop(self, error=None, topic=None, parser=None):
-        """No-op help function."""
-        pass
 
     class OptionParserError(Exception):
         """Used to stop the optparse error handler ending the process."""
@@ -195,15 +228,14 @@
 
         """
         try:
-            options, args = \
-                super(CoverageOptionParser, self).parse_args(args, options)
+            options, args = super(CoverageOptionParser, self).parse_args(args, options)
         except self.OptionParserError:
             return False, None, None
         return True, options, args
 
     def error(self, msg):
         """Override optparse.error so sys.exit doesn't get called."""
-        self.help_fn(msg)
+        show_help(msg)
         raise self.OptionParserError
 
 
@@ -320,13 +352,16 @@
     'html': CmdOptionParser(
         "html",
         [
+            Opts.contexts,
             Opts.directory,
             Opts.fail_under,
             Opts.ignore_errors,
             Opts.include,
             Opts.omit,
+            Opts.show_contexts,
+            Opts.skip_covered,
+            Opts.skip_empty,
             Opts.title,
-            Opts.skip_covered,
             ] + GLOBAL_ARGS,
         usage="[options] [modules]",
         description=(
@@ -336,15 +371,33 @@
         ),
     ),
 
+    'json': CmdOptionParser(
+        "json",
+        [
+            Opts.contexts,
+            Opts.fail_under,
+            Opts.ignore_errors,
+            Opts.include,
+            Opts.omit,
+            Opts.output_json,
+            Opts.json_pretty_print,
+            Opts.show_contexts,
+            ] + GLOBAL_ARGS,
+        usage="[options] [modules]",
+        description="Generate a JSON report of coverage results."
+    ),
+
     'report': CmdOptionParser(
         "report",
         [
+            Opts.contexts,
             Opts.fail_under,
             Opts.ignore_errors,
             Opts.include,
             Opts.omit,
             Opts.show_missing,
             Opts.skip_covered,
+            Opts.skip_empty,
             ] + GLOBAL_ARGS,
         usage="[options] [modules]",
         description="Report coverage statistics on modules."
@@ -356,6 +409,7 @@
             Opts.append,
             Opts.branch,
             Opts.concurrency,
+            Opts.context,
             Opts.include,
             Opts.module,
             Opts.omit,
@@ -383,45 +437,57 @@
 }
 
 
+def show_help(error=None, topic=None, parser=None):
+    """Display an error message, or the named topic."""
+    assert error or topic or parser
+
+    program_path = sys.argv[0]
+    if program_path.endswith(os.path.sep + '__main__.py'):
+        # The path is the main module of a package; get that path instead.
+        program_path = os.path.dirname(program_path)
+    program_name = os.path.basename(program_path)
+    if env.WINDOWS:
+        # entry_points={'console_scripts':...} on Windows makes files
+        # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
+        # invoke coverage-script.py, coverage3-script.py, and
+        # coverage-3.5-script.py.  argv[0] is the .py file, but we want to
+        # get back to the original form.
+        auto_suffix = "-script.py"
+        if program_name.endswith(auto_suffix):
+            program_name = program_name[:-len(auto_suffix)]
+
+    help_params = dict(coverage.__dict__)
+    help_params['program_name'] = program_name
+    if CTracer is not None:
+        help_params['extension_modifier'] = 'with C extension'
+    else:
+        help_params['extension_modifier'] = 'without C extension'
+
+    if error:
+        print(error, file=sys.stderr)
+        print("Use '%s help' for help." % (program_name,), file=sys.stderr)
+    elif parser:
+        print(parser.format_help().strip())
+        print()
+    else:
+        help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
+        if help_msg:
+            print(help_msg.format(**help_params))
+        else:
+            print("Don't know topic %r" % topic)
+    print("Full documentation is at {__url__}".format(**help_params))
+
+
 OK, ERR, FAIL_UNDER = 0, 1, 2
 
 
 class CoverageScript(object):
     """The command-line interface to coverage.py."""
 
-    def __init__(self, _covpkg=None, _run_python_file=None,
-                 _run_python_module=None, _help_fn=None, _path_exists=None):
-        # _covpkg is for dependency injection, so we can test this code.
-        if _covpkg:
-            self.covpkg = _covpkg
-        else:
-            import coverage
-            self.covpkg = coverage
-
-        # For dependency injection:
-        self.run_python_file = _run_python_file or run_python_file
-        self.run_python_module = _run_python_module or run_python_module
-        self.help_fn = _help_fn or self.help
-        self.path_exists = _path_exists or os.path.exists
+    def __init__(self):
         self.global_option = False
-
         self.coverage = None
 
-        program_path = sys.argv[0]
-        if program_path.endswith(os.path.sep + '__main__.py'):
-            # The path is the main module of a package; get that path instead.
-            program_path = os.path.dirname(program_path)
-        self.program_name = os.path.basename(program_path)
-        if env.WINDOWS:
-            # entry_points={'console_scripts':...} on Windows makes files
-            # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
-            # invoke coverage-script.py, coverage3-script.py, and
-            # coverage-3.5-script.py.  argv[0] is the .py file, but we want to
-            # get back to the original form.
-            auto_suffix = "-script.py"
-            if self.program_name.endswith(auto_suffix):
-                self.program_name = self.program_name[:-len(auto_suffix)]
-
     def command_line(self, argv):
         """The bulk of the command line interface to coverage.py.
 
@@ -432,7 +498,7 @@
         """
         # Collect the command-line options.
         if not argv:
-            self.help_fn(topic='minimum_help')
+            show_help(topic='minimum_help')
             return OK
 
         # The command syntax we parse depends on the first argument.  Global
@@ -443,11 +509,10 @@
         else:
             parser = CMDS.get(argv[0])
             if not parser:
-                self.help_fn("Unknown command: '%s'" % argv[0])
+                show_help("Unknown command: '%s'" % argv[0])
                 return ERR
             argv = argv[1:]
 
-        parser.help_fn = self.help_fn
         ok, options, args = parser.parse_args_ok(argv)
         if not ok:
             return ERR
@@ -456,18 +521,15 @@
         if self.do_help(options, args, parser):
             return OK
 
-        # We need to be able to import from the current directory, because
-        # plugins may try to, for example, to read Django settings.
-        sys.path[0] = ''
-
         # Listify the list options.
         source = unshell_list(options.source)
         omit = unshell_list(options.omit)
         include = unshell_list(options.include)
         debug = unshell_list(options.debug)
+        contexts = unshell_list(options.contexts)
 
         # Do something.
-        self.coverage = self.covpkg.Coverage(
+        self.coverage = Coverage(
             data_suffix=options.parallel_mode,
             cover_pylib=options.pylib,
             timid=options.timid,
@@ -478,6 +540,8 @@
             include=include,
             debug=debug,
             concurrency=options.concurrency,
+            check_preimported=True,
+            context=options.context,
             )
 
         if options.action == "debug":
@@ -504,25 +568,45 @@
             ignore_errors=options.ignore_errors,
             omit=omit,
             include=include,
+            contexts=contexts,
             )
 
+        # We need to be able to import from the current directory, because
+        # plugins may try to, for example, to read Django settings.
+        sys.path.insert(0, '')
+
         self.coverage.load()
 
         total = None
         if options.action == "report":
             total = self.coverage.report(
                 show_missing=options.show_missing,
-                skip_covered=options.skip_covered, **report_args)
+                skip_covered=options.skip_covered,
+                skip_empty=options.skip_empty,
+                **report_args
+                )
         elif options.action == "annotate":
-            self.coverage.annotate(
-                directory=options.directory, **report_args)
+            self.coverage.annotate(directory=options.directory, **report_args)
         elif options.action == "html":
             total = self.coverage.html_report(
-                directory=options.directory, title=options.title,
-                skip_covered=options.skip_covered, **report_args)
+                directory=options.directory,
+                title=options.title,
+                skip_covered=options.skip_covered,
+                skip_empty=options.skip_empty,
+                show_contexts=options.show_contexts,
+                **report_args
+                )
         elif options.action == "xml":
             outfile = options.outfile
             total = self.coverage.xml_report(outfile=outfile, **report_args)
+        elif options.action == "json":
+            outfile = options.outfile
+            total = self.coverage.json_report(
+                outfile=outfile,
+                pretty_print=options.pretty_print,
+                show_contexts=options.show_contexts,
+                **report_args
+            )
 
         if total is not None:
             # Apply the command line fail-under options, and then use the config
@@ -537,27 +621,6 @@
 
         return OK
 
-    def help(self, error=None, topic=None, parser=None):
-        """Display an error message, or the named topic."""
-        assert error or topic or parser
-        if error:
-            print(error, file=sys.stderr)
-            print("Use '%s help' for help." % (self.program_name,), file=sys.stderr)
-        elif parser:
-            print(parser.format_help().strip())
-        else:
-            help_params = dict(self.covpkg.__dict__)
-            help_params['program_name'] = self.program_name
-            if CTracer is not None:
-                help_params['extension_modifier'] = 'with C extension'
-            else:
-                help_params['extension_modifier'] = 'without C extension'
-            help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
-            if help_msg:
-                print(help_msg.format(**help_params))
-            else:
-                print("Don't know topic %r" % topic)
-
     def do_help(self, options, args, parser):
         """Deal with help requests.
 
@@ -567,9 +630,9 @@
         # Handle help.
         if options.help:
             if self.global_option:
-                self.help_fn(topic='help')
+                show_help(topic='help')
             else:
-                self.help_fn(parser=parser)
+                show_help(parser=parser)
             return True
 
         if options.action == "help":
@@ -577,16 +640,16 @@
                 for a in args:
                     parser = CMDS.get(a)
                     if parser:
-                        self.help_fn(parser=parser)
+                        show_help(parser=parser)
                     else:
-                        self.help_fn(topic=a)
+                        show_help(topic=a)
             else:
-                self.help_fn(topic='help')
+                show_help(topic='help')
             return True
 
         # Handle version.
         if options.version:
-            self.help_fn(topic='version')
+            show_help(topic='version')
             return True
 
         return False
@@ -595,11 +658,22 @@
         """Implementation of 'coverage run'."""
 
         if not args:
-            self.help_fn("Nothing to do.")
+            if options.module:
+                # Specified -m with nothing else.
+                show_help("No module specified for -m")
+                return ERR
+            command_line = self.coverage.get_option("run:command_line")
+            if command_line is not None:
+                args = shlex.split(command_line)
+                if args and args[0] == "-m":
+                    options.module = True
+                    args = args[1:]
+        if not args:
+            show_help("Nothing to do.")
             return ERR
 
         if options.append and self.coverage.get_option("run:parallel"):
-            self.help_fn("Can't append to data files in parallel mode.")
+            show_help("Can't append to data files in parallel mode.")
             return ERR
 
         if options.concurrency == "multiprocessing":
@@ -609,35 +683,30 @@
                 # As it happens, all of these options have no default, meaning
                 # they will be None if they have not been specified.
                 if getattr(options, opt_name) is not None:
-                    self.help_fn(
-                        "Options affecting multiprocessing must be specified "
-                        "in a configuration file."
+                    show_help(
+                        "Options affecting multiprocessing must only be specified "
+                        "in a configuration file.\n"
+                        "Remove --{} from the command line.".format(opt_name)
                     )
                     return ERR
 
-        if not self.coverage.get_option("run:parallel"):
-            if not options.append:
-                self.coverage.erase()
+        runner = PyRunner(args, as_module=bool(options.module))
+        runner.prepare()
+
+        if options.append:
+            self.coverage.load()
 
         # Run the script.
         self.coverage.start()
         code_ran = True
         try:
-            if options.module:
-                self.run_python_module(args[0], args)
-            else:
-                filename = args[0]
-                self.run_python_file(filename, args)
+            runner.run()
         except NoSource:
             code_ran = False
             raise
         finally:
             self.coverage.stop()
             if code_ran:
-                if options.append:
-                    data_file = self.coverage.get_option("run:data_file")
-                    if self.path_exists(data_file):
-                        self.coverage.combine(data_paths=[data_file])
                 self.coverage.save()
 
         return OK
@@ -646,7 +715,7 @@
         """Implementation of 'coverage debug'."""
 
         if not args:
-            self.help_fn("What information would you like: config, data, sys?")
+            show_help("What information would you like: config, data, sys, premain?")
             return ERR
 
         for info in args:
@@ -657,12 +726,12 @@
                     print(" %s" % line)
             elif info == 'data':
                 self.coverage.load()
-                data = self.coverage.data
+                data = self.coverage.get_data()
                 print(info_header("data"))
-                print("path: %s" % self.coverage.data_files.filename)
+                print("path: %s" % self.coverage.get_data().data_filename())
                 if data:
                     print("has_arcs: %r" % data.has_arcs())
-                    summary = data.line_counts(fullpath=True)
+                    summary = line_counts(data, fullpath=True)
                     filenames = sorted(summary.keys())
                     print("\n%d files:" % len(filenames))
                     for f in filenames:
@@ -678,8 +747,12 @@
                 config_info = self.coverage.config.__dict__.items()
                 for line in info_formatter(config_info):
                     print(" %s" % line)
+            elif info == "premain":
+                print(info_header("premain"))
+                from coverage.debug import short_stack
+                print(short_stack())
             else:
-                self.help_fn("Don't know what you mean by %r" % info)
+                show_help("Don't know what you mean by %r" % info)
                 return ERR
 
         return OK
@@ -725,12 +798,12 @@
             erase       Erase previously collected coverage data.
             help        Get help on using coverage.py.
             html        Create an HTML report.
+            json        Create a JSON report of coverage results.
             report      Report coverage stats on modules.
             run         Run a Python program and measure code execution.
             xml         Create an XML report of coverage results.
 
         Use "{program_name} help <command>" for detailed help on any command.
-        For full documentation, see {__url__}
     """,
 
     'minimum_help': """\
@@ -739,7 +812,6 @@
 
     'version': """\
         Coverage.py, version {__version__} {extension_modifier}
-        Documentation at {__url__}
     """,
 }
 
@@ -762,7 +834,10 @@
         status = ERR
     except BaseCoverageException as err:
         # A controlled error inside coverage.py: print the message to the user.
-        print(err)
+        msg = err.args[0]
+        if env.PY2:
+            msg = msg.encode(output_encoding())
+        print(msg)
         status = ERR
     except SystemExit as err:
         # The user called `sys.exit()`.  Exit with their argument, if any.
@@ -771,3 +846,22 @@
         else:
             status = None
     return status
+
+# Profiling using ox_profile.  Install it from GitHub:
+#   pip install git+https://github.com/emin63/ox_profile.git
+#
+# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile.
+_profile = os.environ.get("COVERAGE_PROFILE", "")
+if _profile:                                                # pragma: debugging
+    from ox_profile.core.launchers import SimpleLauncher    # pylint: disable=import-error
+    original_main = main
+
+    def main(argv=None):                                    # pylint: disable=function-redefined
+        """A wrapper around main that profiles."""
+        try:
+            profiler = SimpleLauncher.launch()
+            return original_main(argv)
+        finally:
+            data, _ = profiler.query(re_filter='coverage', max_records=100)
+            print(profiler.show(query=data, limit=100, sep='', col=''))
+            profiler.cancel()
--- a/eric6/DebugClients/Python/coverage/collector.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/collector.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Raw data collector for coverage.py."""
 
@@ -9,7 +9,7 @@
 from coverage import env
 from coverage.backward import litems, range     # pylint: disable=redefined-builtin
 from coverage.debug import short_stack
-from coverage.files import abs_file
+from coverage.disposition import FileDisposition
 from coverage.misc import CoverageException, isolate_module
 from coverage.pytracer import PyTracer
 
@@ -33,19 +33,6 @@
     CTracer = None
 
 
-class FileDisposition(object):
-    """A simple value type for recording what to do with a file."""
-    pass
-
-
-def should_start_context(frame):
-    """Who-Tests-What hack: Determine whether this frame begins a new who-context."""
-    fn_name = frame.f_code.co_name
-    if fn_name.startswith("test"):
-        return fn_name
-    return None
-
-
 class Collector(object):
     """Collects trace data.
 
@@ -70,7 +57,10 @@
     # The concurrency settings we support here.
     SUPPORTED_CONCURRENCIES = set(["greenlet", "eventlet", "gevent", "thread"])
 
-    def __init__(self, should_trace, check_include, timid, branch, warn, concurrency):
+    def __init__(
+        self, should_trace, check_include, should_start_context, file_mapper,
+        timid, branch, warn, concurrency,
+    ):
         """Create a collector.
 
         `should_trace` is a function, taking a file name and a frame, and
@@ -79,6 +69,15 @@
         `check_include` is a function taking a file name and a frame. It returns
         a boolean: True if the file should be traced, False if not.
 
+        `should_start_context` is a function taking a frame, and returning a
+        string. If the frame should be the start of a new context, the string
+        is the new context. If the frame should not be the start of a new
+        context, return None.
+
+        `file_mapper` is a function taking a filename, and returning a Unicode
+        filename.  The result is the name that will be recorded in the data
+        file.
+
         If `timid` is true, then a slower simpler trace function will be
         used.  This is important for some environments where manipulation of
         tracing functions make the faster more sophisticated trace function not
@@ -100,13 +99,19 @@
         """
         self.should_trace = should_trace
         self.check_include = check_include
+        self.should_start_context = should_start_context
+        self.file_mapper = file_mapper
         self.warn = warn
         self.branch = branch
         self.threading = None
+        self.covdata = None
+
+        self.static_context = None
 
         self.origin = short_stack()
 
         self.concur_id_func = None
+        self.mapped_file_cache = {}
 
         # We can handle a few concurrency options here, but only one at a time.
         these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency)
@@ -139,10 +144,6 @@
                 )
             )
 
-        # Who-Tests-What is just a hack at the moment, so turn it on with an
-        # environment variable.
-        self.wtw = int(os.getenv('COVERAGE_WTW', 0))
-
         self.reset()
 
         if timid:
@@ -163,13 +164,23 @@
     def __repr__(self):
         return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
 
+    def use_data(self, covdata, context):
+        """Use `covdata` for recording data."""
+        self.covdata = covdata
+        self.static_context = context
+        self.covdata.set_context(self.static_context)
+
     def tracer_name(self):
         """Return the class name of the tracer we're using."""
         return self._trace_class.__name__
 
     def _clear_data(self):
         """Clear out existing data, but stay ready for more collection."""
-        self.data.clear()
+        # We used to used self.data.clear(), but that would remove filename
+        # keys and data values that were still in use higher up the stack
+        # when we are called as part of switch_context.
+        for d in self.data.values():
+            d.clear()
 
         for tracer in self.tracers:
             tracer.reset_activity()
@@ -181,10 +192,6 @@
         # pairs as keys (if branch coverage).
         self.data = {}
 
-        # A dict mapping contexts to data dictionaries.
-        self.contexts = {}
-        self.contexts[None] = self.data
-
         # A dictionary mapping file names to file tracer plugin names that will
         # handle them.
         self.file_tracers = {}
@@ -246,11 +253,9 @@
             tracer.threading = self.threading
         if hasattr(tracer, 'check_include'):
             tracer.check_include = self.check_include
-        if self.wtw:
-            if hasattr(tracer, 'should_start_context'):
-                tracer.should_start_context = should_start_context
-            if hasattr(tracer, 'switch_context'):
-                tracer.switch_context = self.switch_context
+        if hasattr(tracer, 'should_start_context'):
+            tracer.should_start_context = self.should_start_context
+            tracer.switch_context = self.switch_context
 
         fn = tracer.start()
         self.tracers.append(tracer)
@@ -366,52 +371,59 @@
         return any(tracer.activity() for tracer in self.tracers)
 
     def switch_context(self, new_context):
-        """Who-Tests-What hack: switch to a new who-context."""
-        # Make a new data dict, or find the existing one, and switch all the
-        # tracers to use it.
-        data = self.contexts.setdefault(new_context, {})
-        for tracer in self.tracers:
-            tracer.data = data
+        """Switch to a new dynamic context."""
+        self.flush_data()
+        if self.static_context:
+            context = self.static_context
+            if new_context:
+                context += "|" + new_context
+        else:
+            context = new_context
+        self.covdata.set_context(context)
+
+    def cached_mapped_file(self, filename):
+        """A locally cached version of file names mapped through file_mapper."""
+        key = (type(filename), filename)
+        try:
+            return self.mapped_file_cache[key]
+        except KeyError:
+            return self.mapped_file_cache.setdefault(key, self.file_mapper(filename))
 
-    def save_data(self, covdata):
-        """Save the collected data to a `CoverageData`.
+    def mapped_file_dict(self, d):
+        """Return a dict like d, but with keys modified by file_mapper."""
+        # The call to litems() ensures that the GIL protects the dictionary
+        # iterator against concurrent modifications by tracers running
+        # in other threads. We try three times in case of concurrent
+        # access, hoping to get a clean copy.
+        runtime_err = None
+        for _ in range(3):
+            try:
+                items = litems(d)
+            except RuntimeError as ex:
+                runtime_err = ex
+            else:
+                break
+        else:
+            raise runtime_err
+
+        return dict((self.cached_mapped_file(k), v) for k, v in items if v)
+
+    def flush_data(self):
+        """Save the collected data to our associated `CoverageData`.
+
+        Data may have also been saved along the way. This forces the
+        last of the data to be saved.
 
         Returns True if there was data to save, False if not.
         """
         if not self._activity():
             return False
 
-        def abs_file_dict(d):
-            """Return a dict like d, but with keys modified by `abs_file`."""
-            # The call to litems() ensures that the GIL protects the dictionary
-            # iterator against concurrent modifications by tracers running
-            # in other threads. We try three times in case of concurrent
-            # access, hoping to get a clean copy.
-            runtime_err = None
-            for _ in range(3):
-                try:
-                    items = litems(d)
-                except RuntimeError as ex:
-                    runtime_err = ex
-                else:
-                    break
-            else:
-                raise runtime_err       # pylint: disable=raising-bad-type
-
-            return dict((abs_file(k), v) for k, v in items)
-
         if self.branch:
-            covdata.add_arcs(abs_file_dict(self.data))
+            self.covdata.add_arcs(self.mapped_file_dict(self.data))
         else:
-            covdata.add_lines(abs_file_dict(self.data))
-        covdata.add_file_tracers(abs_file_dict(self.file_tracers))
-
-        if self.wtw:
-            # Just a hack, so just hack it.
-            import pprint
-            out_file = "coverage_wtw_{:06}.py".format(os.getpid())
-            with open(out_file, "w") as wtw_out:
-                pprint.pprint(self.contexts, wtw_out)
+            self.covdata.add_lines(self.mapped_file_dict(self.data))
+        self.covdata.add_file_tracers(self.mapped_file_dict(self.file_tracers))
 
         self._clear_data()
         return True
--- a/eric6/DebugClients/Python/coverage/config.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/config.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,15 +1,20 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Config file for coverage.py"""
 
 import collections
+import copy
 import os
+import os.path
 import re
-import sys
 
+from coverage import env
 from coverage.backward import configparser, iitems, string_class
 from coverage.misc import contract, CoverageException, isolate_module
+from coverage.misc import substitute_variables
+
+from coverage.tomlconfig import TomlConfigParser, TomlDecodeError
 
 os = isolate_module(os)
 
@@ -30,11 +35,11 @@
         if our_file:
             self.section_prefixes.append("")
 
-    def read(self, filenames):
+    def read(self, filenames, encoding=None):
         """Read a file name as UTF-8 configuration data."""
         kwargs = {}
-        if sys.version_info >= (3, 2):
-            kwargs['encoding'] = "utf-8"
+        if env.PYVERSION >= (3, 2):
+            kwargs['encoding'] = encoding or "utf-8"
         return configparser.RawConfigParser.read(self, filenames, **kwargs)
 
     def has_option(self, section, option):
@@ -85,23 +90,7 @@
             raise configparser.NoOptionError
 
         v = configparser.RawConfigParser.get(self, real_section, option, *args, **kwargs)
-        def dollar_replace(m):
-            """Called for each $replacement."""
-            # Only one of the groups will have matched, just get its text.
-            word = next(w for w in m.groups() if w is not None)     # pragma: part covered
-            if word == "$":
-                return "$"
-            else:
-                return os.environ.get(word, '')
-
-        dollar_pattern = r"""(?x)   # Use extended regex syntax
-            \$(?:                   # A dollar sign, then
-            (?P<v1>\w+) |           #   a plain word,
-            {(?P<v2>\w+)} |         #   or a {-wrapped word,
-            (?P<char>[$])           #   or a dollar sign.
-            )
-            """
-        v = re.sub(dollar_pattern, dollar_replace, v)
+        v = substitute_variables(v, os.environ)
         return v
 
     def getlist(self, section, option):
@@ -172,11 +161,18 @@
     operation of coverage.py.
 
     """
+    # pylint: disable=too-many-instance-attributes
+
     def __init__(self):
         """Initialize the configuration attributes to their defaults."""
         # Metadata about the config.
+        # We tried to read these config files.
         self.attempted_config_files = []
-        self.config_files = []
+        # We did read these config files, but maybe didn't find any content for us.
+        self.config_files_read = []
+        # The file that gave us our configuration.
+        self.config_file = None
+        self._config_contents = None
 
         # Defaults for [run] and [report]
         self._include = None
@@ -184,18 +180,23 @@
 
         # Defaults for [run]
         self.branch = False
+        self.command_line = None
         self.concurrency = None
+        self.context = None
         self.cover_pylib = False
         self.data_file = ".coverage"
         self.debug = []
         self.disable_warnings = []
+        self.dynamic_context = None
         self.note = None
         self.parallel = False
         self.plugins = []
-        self.source = None
+        self.relative_files = False
         self.run_include = None
         self.run_omit = None
+        self.source = None
         self.timid = False
+        self._crash = None
 
         # Defaults for [report]
         self.exclude_list = DEFAULT_EXCLUDE[:]
@@ -206,20 +207,28 @@
         self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
         self.partial_list = DEFAULT_PARTIAL[:]
         self.precision = 0
+        self.report_contexts = None
         self.show_missing = False
         self.skip_covered = False
+        self.skip_empty = False
 
         # Defaults for [html]
         self.extra_css = None
         self.html_dir = "htmlcov"
         self.html_title = "Coverage report"
+        self.show_contexts = False
 
         # Defaults for [xml]
         self.xml_output = "coverage.xml"
         self.xml_package_depth = 99
 
+        # Defaults for [json]
+        self.json_output = "coverage.json"
+        self.json_pretty_print = False
+        self.json_show_contexts = False
+
         # Defaults for [paths]
-        self.paths = {}
+        self.paths = collections.OrderedDict()
 
         # Options for plugins
         self.plugin_options = {}
@@ -252,17 +261,22 @@
         coverage.py settings in it.
 
         """
+        _, ext = os.path.splitext(filename)
+        if ext == '.toml':
+            cp = TomlConfigParser(our_file)
+        else:
+            cp = HandyConfigParser(our_file)
+
         self.attempted_config_files.append(filename)
 
-        cp = HandyConfigParser(our_file)
         try:
             files_read = cp.read(filename)
-        except configparser.Error as err:
+        except (configparser.Error, TomlDecodeError) as err:
             raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
         if not files_read:
             return False
 
-        self.config_files.extend(files_read)
+        self.config_files_read.extend(map(os.path.abspath, files_read))
 
         any_set = False
         try:
@@ -305,9 +319,20 @@
         # then it was used.  If we're piggybacking on someone else's file,
         # then it was only used if we found some settings in it.
         if our_file:
-            return True
+            used = True
         else:
-            return any_set
+            used = any_set
+
+        if used:
+            self.config_file = os.path.abspath(filename)
+            with open(filename) as f:
+                self._config_contents = f.read()
+
+        return used
+
+    def copy(self):
+        """Return a copy of the configuration."""
+        return copy.deepcopy(self)
 
     CONFIG_FILE_OPTIONS = [
         # These are *args for _set_attr_from_config_option:
@@ -320,18 +345,23 @@
 
         # [run]
         ('branch', 'run:branch', 'boolean'),
+        ('command_line', 'run:command_line'),
         ('concurrency', 'run:concurrency', 'list'),
+        ('context', 'run:context'),
         ('cover_pylib', 'run:cover_pylib', 'boolean'),
         ('data_file', 'run:data_file'),
         ('debug', 'run:debug', 'list'),
         ('disable_warnings', 'run:disable_warnings', 'list'),
+        ('dynamic_context', 'run:dynamic_context'),
         ('note', 'run:note'),
         ('parallel', 'run:parallel', 'boolean'),
         ('plugins', 'run:plugins', 'list'),
+        ('relative_files', 'run:relative_files', 'boolean'),
         ('run_include', 'run:include', 'list'),
         ('run_omit', 'run:omit', 'list'),
         ('source', 'run:source', 'list'),
         ('timid', 'run:timid', 'boolean'),
+        ('_crash', 'run:_crash'),
 
         # [report]
         ('exclude_list', 'report:exclude_lines', 'regexlist'),
@@ -340,20 +370,28 @@
         ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
         ('partial_list', 'report:partial_branches', 'regexlist'),
         ('precision', 'report:precision', 'int'),
+        ('report_contexts', 'report:contexts', 'list'),
         ('report_include', 'report:include', 'list'),
         ('report_omit', 'report:omit', 'list'),
         ('show_missing', 'report:show_missing', 'boolean'),
         ('skip_covered', 'report:skip_covered', 'boolean'),
+        ('skip_empty', 'report:skip_empty', 'boolean'),
         ('sort', 'report:sort'),
 
         # [html]
         ('extra_css', 'html:extra_css'),
         ('html_dir', 'html:directory'),
         ('html_title', 'html:title'),
+        ('show_contexts', 'html:show_contexts', 'boolean'),
 
         # [xml]
         ('xml_output', 'xml:output'),
         ('xml_package_depth', 'xml:package_depth', 'int'),
+
+        # [json]
+        ('json_output', 'json:output'),
+        ('json_pretty_print', 'json:pretty_print', 'boolean'),
+        ('json_show_contexts', 'json:show_contexts', 'boolean'),
     ]
 
     def _set_attr_from_config_option(self, cp, attr, where, type_=''):
@@ -425,6 +463,35 @@
         raise CoverageException("No such option: %r" % option_name)
 
 
+def config_files_to_try(config_file):
+    """What config files should we try to read?
+
+    Returns a list of tuples:
+        (filename, is_our_file, was_file_specified)
+    """
+
+    # Some API users were specifying ".coveragerc" to mean the same as
+    # True, so make it so.
+    if config_file == ".coveragerc":
+        config_file = True
+    specified_file = (config_file is not True)
+    if not specified_file:
+        # No file was specified. Check COVERAGE_RCFILE.
+        config_file = os.environ.get('COVERAGE_RCFILE')
+        if config_file:
+            specified_file = True
+    if not specified_file:
+        # Still no file specified. Default to .coveragerc
+        config_file = ".coveragerc"
+    files_to_try = [
+        (config_file, True, specified_file),
+        ("setup.cfg", False, False),
+        ("tox.ini", False, False),
+        ("pyproject.toml", False, False),
+    ]
+    return files_to_try
+
+
 def read_coverage_config(config_file, **kwargs):
     """Read the coverage.py configuration.
 
@@ -435,10 +502,7 @@
             setting values in the configuration.
 
     Returns:
-        config_file, config:
-            config_file is the value to use for config_file in other
-            invocations of coverage.
-
+        config:
             config is a CoverageConfig object read from the appropriate
             configuration file.
 
@@ -449,26 +513,16 @@
 
     # 2) from a file:
     if config_file:
-        # Some API users were specifying ".coveragerc" to mean the same as
-        # True, so make it so.
-        if config_file == ".coveragerc":
-            config_file = True
-        specified_file = (config_file is not True)
-        if not specified_file:
-            config_file = ".coveragerc"
+        files_to_try = config_files_to_try(config_file)
 
-        for fname, our_file in [(config_file, True),
-                                ("setup.cfg", False),
-                                ("tox.ini", False)]:
+        for fname, our_file, specified_file in files_to_try:
             config_read = config.from_file(fname, our_file=our_file)
-            is_config_file = fname == config_file
-
-            if not config_read and is_config_file and specified_file:
+            if config_read:
+                break
+            if specified_file:
                 raise CoverageException("Couldn't read '%s' as a config file" % fname)
 
-            if config_read:
-                break
-
+    # $set_env.py: COVERAGE_DEBUG - Options for --debug.
     # 3) from environment variables:
     env_data_file = os.environ.get('COVERAGE_FILE')
     if env_data_file:
@@ -485,5 +539,9 @@
     config.data_file = os.path.expanduser(config.data_file)
     config.html_dir = os.path.expanduser(config.html_dir)
     config.xml_output = os.path.expanduser(config.xml_output)
+    config.paths = collections.OrderedDict(
+        (k, [os.path.expanduser(f) for f in v])
+        for k, v in config.paths.items()
+    )
 
-    return config_file, config
+    return config
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/eric6/DebugClients/Python/coverage/context.py	Sat Feb 22 14:27:42 2020 +0100
@@ -0,0 +1,91 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Determine contexts for coverage.py"""
+
+
+def combine_context_switchers(context_switchers):
+    """Create a single context switcher from multiple switchers.
+
+    `context_switchers` is a list of functions that take a frame as an
+    argument and return a string to use as the new context label.
+
+    Returns a function that composites `context_switchers` functions, or None
+    if `context_switchers` is an empty list.
+
+    When invoked, the combined switcher calls `context_switchers` one-by-one
+    until a string is returned.  The combined switcher returns None if all
+    `context_switchers` return None.
+    """
+    if not context_switchers:
+        return None
+
+    if len(context_switchers) == 1:
+        return context_switchers[0]
+
+    def should_start_context(frame):
+        """The combiner for multiple context switchers."""
+        for switcher in context_switchers:
+            new_context = switcher(frame)
+            if new_context is not None:
+                return new_context
+        return None
+
+    return should_start_context
+
+
+def should_start_context_test_function(frame):
+    """Is this frame calling a test_* function?"""
+    co_name = frame.f_code.co_name
+    if co_name.startswith("test") or co_name == "runTest":
+        return qualname_from_frame(frame)
+    return None
+
+
+def qualname_from_frame(frame):
+    """Get a qualified name for the code running in `frame`."""
+    co = frame.f_code
+    fname = co.co_name
+    method = None
+    if co.co_argcount and co.co_varnames[0] == "self":
+        self = frame.f_locals["self"]
+        method = getattr(self, fname, None)
+
+    if method is None:
+        func = frame.f_globals.get(fname)
+        if func is None:
+            return None
+        return func.__module__ + '.' + fname
+
+    func = getattr(method, '__func__', None)
+    if func is None:
+        cls = self.__class__
+        return cls.__module__ + '.' + cls.__name__ + "." + fname
+
+    if hasattr(func, '__qualname__'):
+        qname = func.__module__ + '.' + func.__qualname__
+    else:
+        for cls in getattr(self.__class__, '__mro__', ()):
+            f = cls.__dict__.get(fname, None)
+            if f is None:
+                continue
+            if f is func:
+                qname = cls.__module__ + '.' + cls.__name__ + "." + fname
+                break
+        else:
+            # Support for old-style classes.
+            def mro(bases):
+                for base in bases:
+                    f = base.__dict__.get(fname, None)
+                    if f is func:
+                        return base.__module__ + '.' + base.__name__ + "." + fname
+                for base in bases:
+                    qname = mro(base.__bases__)
+                    if qname is not None:
+                        return qname
+                return None
+            qname = mro([self.__class__])
+            if qname is None:
+                qname = func.__module__ + '.' + fname
+
+    return qname
--- a/eric6/DebugClients/Python/coverage/control.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/control.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,36 +1,35 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Core control stuff for coverage.py."""
 
-
 import atexit
-import inspect
-import itertools
+import contextlib
 import os
+import os.path
 import platform
-import re
 import sys
 import time
-import traceback
 
 from coverage import env
 from coverage.annotate import AnnotateReporter
 from coverage.backward import string_class, iitems
-from coverage.collector import Collector
+from coverage.collector import Collector, CTracer
 from coverage.config import read_coverage_config
-from coverage.data import CoverageData, CoverageDataFiles
-from coverage.debug import DebugControl, write_formatted_info
-from coverage.files import TreeMatcher, FnmatchMatcher
-from coverage.files import PathAliases, find_python_files, prep_patterns
-from coverage.files import canonical_filename, set_relative_directory
-from coverage.files import ModuleMatcher, abs_file
+from coverage.context import should_start_context_test_function, combine_context_switchers
+from coverage.data import CoverageData, combine_parallel_data
+from coverage.debug import DebugControl, short_stack, write_formatted_info
+from coverage.disposition import disposition_debug_msg
+from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory
 from coverage.html import HtmlReporter
+from coverage.inorout import InOrOut
+from coverage.jsonreport import JsonReporter
 from coverage.misc import CoverageException, bool_or_none, join_regex
-from coverage.misc import file_be_gone, isolate_module
+from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module
 from coverage.plugin import FileReporter
 from coverage.plugin_support import Plugins
-from coverage.python import PythonFileReporter, source_for_file
+from coverage.python import PythonFileReporter
+from coverage.report import render_report
 from coverage.results import Analysis, Numbers
 from coverage.summary import SummaryReporter
 from coverage.xmlreport import XmlReporter
@@ -43,22 +42,23 @@
 
 os = isolate_module(os)
 
-# Pypy has some unusual stuff in the "stdlib".  Consider those locations
-# when deciding where the stdlib is.  These modules are not used for anything,
-# they are modules importable from the pypy lib directories, so that we can
-# find those directories.
-_structseq = _pypy_irc_topic = None
-if env.PYPY:
+@contextlib.contextmanager
+def override_config(cov, **kwargs):
+    """Temporarily tweak the configuration of `cov`.
+
+    The arguments are applied to `cov.config` with the `from_args` method.
+    At the end of the with-statement, the old configuration is restored.
+    """
+    original_config = cov.config
+    cov.config = cov.config.copy()
     try:
-        import _structseq
-    except ImportError:
-        pass
+        cov.config.from_args(**kwargs)
+        yield
+    finally:
+        cov.config = original_config
 
-    try:
-        import _pypy_irc_topic
-    except ImportError:
-        pass
 
+_DEFAULT_DATAFILE = DefaultValue("MISSING")
 
 class Coverage(object):
     """Programmatic access to coverage.py.
@@ -73,18 +73,45 @@
         cov.stop()
         cov.html_report(directory='covhtml')
 
+    Note: in keeping with Python custom, names starting with underscore are
+    not part of the public API. They might stop working at any point.  Please
+    limit yourself to documented methods to avoid problems.
+
     """
+
+    # The stack of started Coverage instances.
+    _instances = []
+
+    @classmethod
+    def current(cls):
+        """Get the latest started `Coverage` instance, if any.
+
+        Returns: a `Coverage` instance, or None.
+
+        .. versionadded:: 5.0
+
+        """
+        if cls._instances:
+            return cls._instances[-1]
+        else:
+            return None
+
     def __init__(
-        self, data_file=None, data_suffix=None, cover_pylib=None,
+        self, data_file=_DEFAULT_DATAFILE, data_suffix=None, cover_pylib=None,
         auto_data=False, timid=None, branch=None, config_file=True,
         source=None, omit=None, include=None, debug=None,
-        concurrency=None,
+        concurrency=None, check_preimported=False, context=None,
     ):
         """
-        `data_file` is the base name of the data file to use, defaulting to
-        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
-        create the final file name.  If `data_suffix` is simply True, then a
-        suffix is created with the machine and process identity included.
+        Many of these arguments duplicate and override values that can be
+        provided in a configuration file.  Parameters that are missing here
+        will use values from the config file.
+
+        `data_file` is the base name of the data file to use. The config value
+        defaults to ".coverage".  None can be provided to prevent writing a data
+        file.  `data_suffix` is appended (with a dot) to `data_file` to create
+        the final file name.  If `data_suffix` is simply True, then a suffix is
+        created with the machine and process identity included.
 
         `cover_pylib` is a boolean determining whether Python code installed
         with the Python interpreter is measured.  This includes the Python
@@ -132,58 +159,73 @@
         "eventlet", "gevent", "multiprocessing", or "thread" (the default).
         This can also be a list of these strings.
 
+        If `check_preimported` is true, then when coverage is started, the
+        already-imported files will be checked to see if they should be
+        measured by coverage.  Importing measured files before coverage is
+        started can mean that code is missed.
+
+        `context` is a string to use as the :ref:`static context
+        <static_contexts>` label for collected data.
+
         .. versionadded:: 4.0
             The `concurrency` parameter.
 
         .. versionadded:: 4.2
             The `concurrency` parameter can now be a list of strings.
 
+        .. versionadded:: 5.0
+            The `check_preimported` and `context` parameters.
+
         """
+        # data_file=None means no disk file at all. data_file missing means
+        # use the value from the config file.
+        self._no_disk = data_file is None
+        if data_file is _DEFAULT_DATAFILE:
+            data_file = None
+
         # Build our configuration from a number of sources.
-        self.config_file, self.config = read_coverage_config(
+        self.config = read_coverage_config(
             config_file=config_file,
             data_file=data_file, cover_pylib=cover_pylib, timid=timid,
             branch=branch, parallel=bool_or_none(data_suffix),
             source=source, run_omit=omit, run_include=include, debug=debug,
             report_omit=omit, report_include=include,
-            concurrency=concurrency,
+            concurrency=concurrency, context=context,
             )
 
         # This is injectable by tests.
         self._debug_file = None
 
         self._auto_load = self._auto_save = auto_data
-        self._data_suffix = data_suffix
-
-        # The matchers for _should_trace.
-        self.source_match = None
-        self.source_pkgs_match = None
-        self.pylib_match = self.cover_match = None
-        self.include_match = self.omit_match = None
+        self._data_suffix_specified = data_suffix
 
         # Is it ok for no data to be collected?
         self._warn_no_data = True
         self._warn_unimported_source = True
+        self._warn_preimported_source = check_preimported
+        self._no_warn_slugs = None
 
         # A record of all the warnings that have been issued.
         self._warnings = []
 
         # Other instance attributes, set later.
-        self.omit = self.include = self.source = None
-        self.source_pkgs_unmatched = None
-        self.source_pkgs = None
-        self.data = self.data_files = self.collector = None
-        self.plugins = None
-        self.pylib_paths = self.cover_paths = None
-        self.data_suffix = self.run_suffix = None
+        self._data = self._collector = None
+        self._plugins = None
+        self._inorout = None
+        self._inorout_class = InOrOut
+        self._data_suffix = self._run_suffix = None
         self._exclude_re = None
-        self.debug = None
+        self._debug = None
+        self._file_mapper = None
 
         # State machine variables:
         # Have we initialized everything?
         self._inited = False
+        self._inited_for_start = False
         # Have we started collecting and not stopped it?
         self._started = False
+        # Should we write the debug output?
+        self._should_write_debug = True
 
         # If we have sub-process measurement happening automatically, then we
         # want any explicit creation of a Coverage object to mean, this process
@@ -209,378 +251,61 @@
         # Create and configure the debugging controller. COVERAGE_DEBUG_FILE
         # is an environment variable, the name of a file to append debug logs
         # to.
-        if self._debug_file is None:
-            debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE")
-            if debug_file_name:
-                self._debug_file = open(debug_file_name, "a")
-            else:
-                self._debug_file = sys.stderr
-        self.debug = DebugControl(self.config.debug, self._debug_file)
+        self._debug = DebugControl(self.config.debug, self._debug_file)
+
+        if "multiprocessing" in (self.config.concurrency or ()):
+            # Multi-processing uses parallel for the subprocesses, so also use
+            # it for the main process.
+            self.config.parallel = True
 
         # _exclude_re is a dict that maps exclusion list names to compiled regexes.
         self._exclude_re = {}
 
         set_relative_directory()
+        self._file_mapper = relative_filename if self.config.relative_files else abs_file
 
         # Load plugins
-        self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug)
+        self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug)
 
         # Run configuring plugins.
-        for plugin in self.plugins.configurers:
+        for plugin in self._plugins.configurers:
             # We need an object with set_option and get_option. Either self or
             # self.config will do. Choosing randomly stops people from doing
             # other things with those objects, against the public API.  Yes,
             # this is a bit childish. :)
             plugin.configure([self, self.config][int(time.time()) % 2])
 
-        # The source argument can be directories or package names.
-        self.source = []
-        self.source_pkgs = []
-        for src in self.config.source or []:
-            if os.path.isdir(src):
-                self.source.append(canonical_filename(src))
-            else:
-                self.source_pkgs.append(src)
-        self.source_pkgs_unmatched = self.source_pkgs[:]
-
-        self.omit = prep_patterns(self.config.run_omit)
-        self.include = prep_patterns(self.config.run_include)
-
-        concurrency = self.config.concurrency or []
-        if "multiprocessing" in concurrency:
-            if not patch_multiprocessing:
-                raise CoverageException(                    # pragma: only jython
-                    "multiprocessing is not supported on this Python"
-                )
-            patch_multiprocessing(rcfile=self.config_file)
-            # Multi-processing uses parallel for the subprocesses, so also use
-            # it for the main process.
-            self.config.parallel = True
-
-        self.collector = Collector(
-            should_trace=self._should_trace,
-            check_include=self._check_include_omit_etc,
-            timid=self.config.timid,
-            branch=self.config.branch,
-            warn=self._warn,
-            concurrency=concurrency,
-            )
-
-        # Early warning if we aren't going to be able to support plugins.
-        if self.plugins.file_tracers and not self.collector.supports_plugins:
-            self._warn(
-                "Plugin file tracers (%s) aren't supported with %s" % (
-                    ", ".join(
-                        plugin._coverage_plugin_name
-                            for plugin in self.plugins.file_tracers
-                        ),
-                    self.collector.tracer_name(),
-                    )
-                )
-            for plugin in self.plugins.file_tracers:
-                plugin._coverage_enabled = False
-
-        # Suffixes are a bit tricky.  We want to use the data suffix only when
-        # collecting data, not when combining data.  So we save it as
-        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
-        # find that we are collecting data later.
-        if self._data_suffix or self.config.parallel:
-            if not isinstance(self._data_suffix, string_class):
-                # if data_suffix=True, use .machinename.pid.random
-                self._data_suffix = True
-        else:
-            self._data_suffix = None
-        self.data_suffix = None
-        self.run_suffix = self._data_suffix
+    def _post_init(self):
+        """Stuff to do after everything is initialized."""
+        if self._should_write_debug:
+            self._should_write_debug = False
+            self._write_startup_debug()
 
-        # Create the data file.  We do this at construction time so that the
-        # data file will be written into the directory where the process
-        # started rather than wherever the process eventually chdir'd to.
-        self.data = CoverageData(debug=self.debug)
-        self.data_files = CoverageDataFiles(
-            basename=self.config.data_file, warn=self._warn, debug=self.debug,
-        )
-
-        # The directories for files considered "installed with the interpreter".
-        self.pylib_paths = set()
-        if not self.config.cover_pylib:
-            # Look at where some standard modules are located. That's the
-            # indication for "installed with the interpreter". In some
-            # environments (virtualenv, for example), these modules may be
-            # spread across a few locations. Look at all the candidate modules
-            # we've imported, and take all the different ones.
-            for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback):
-                if m is not None and hasattr(m, "__file__"):
-                    self.pylib_paths.add(self._canonical_path(m, directory=True))
-
-            if _structseq and not hasattr(_structseq, '__file__'):
-                # PyPy 2.4 has no __file__ in the builtin modules, but the code
-                # objects still have the file names.  So dig into one to find
-                # the path to exclude.
-                structseq_new = _structseq.structseq_new
-                try:
-                    structseq_file = structseq_new.func_code.co_filename
-                except AttributeError:
-                    structseq_file = structseq_new.__code__.co_filename
-                self.pylib_paths.add(self._canonical_path(structseq_file))
-
-        # To avoid tracing the coverage.py code itself, we skip anything
-        # located where we are.
-        self.cover_paths = [self._canonical_path(__file__, directory=True)]
-        if env.TESTING:
-            # Don't include our own test code.
-            self.cover_paths.append(os.path.join(self.cover_paths[0], "tests"))
-
-            # When testing, we use PyContracts, which should be considered
-            # part of coverage.py, and it uses six. Exclude those directories
-            # just as we exclude ourselves.
-            import contracts
-            import six
-            for mod in [contracts, six]:
-                self.cover_paths.append(self._canonical_path(mod))
-
-        # Set the reporting precision.
-        Numbers.set_precision(self.config.precision)
-
-        atexit.register(self._atexit)
-
-        # Create the matchers we need for _should_trace
-        if self.source or self.source_pkgs:
-            self.source_match = TreeMatcher(self.source)
-            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
-        else:
-            if self.cover_paths:
-                self.cover_match = TreeMatcher(self.cover_paths)
-            if self.pylib_paths:
-                self.pylib_match = TreeMatcher(self.pylib_paths)
-        if self.include:
-            self.include_match = FnmatchMatcher(self.include)
-        if self.omit:
-            self.omit_match = FnmatchMatcher(self.omit)
-
-        # The user may want to debug things, show info if desired.
-        self._write_startup_debug()
+        # '[run] _crash' will raise an exception if the value is close by in
+        # the call stack, for testing error handling.
+        if self.config._crash and self.config._crash in short_stack(limit=4):
+            raise Exception("Crashing because called by {}".format(self.config._crash))
 
     def _write_startup_debug(self):
         """Write out debug info at startup if needed."""
         wrote_any = False
-        with self.debug.without_callers():
-            if self.debug.should('config'):
+        with self._debug.without_callers():
+            if self._debug.should('config'):
                 config_info = sorted(self.config.__dict__.items())
-                write_formatted_info(self.debug, "config", config_info)
+                config_info = [(k, v) for k, v in config_info if not k.startswith('_')]
+                write_formatted_info(self._debug, "config", config_info)
                 wrote_any = True
 
-            if self.debug.should('sys'):
-                write_formatted_info(self.debug, "sys", self.sys_info())
-                for plugin in self.plugins:
+            if self._debug.should('sys'):
+                write_formatted_info(self._debug, "sys", self.sys_info())
+                for plugin in self._plugins:
                     header = "sys: " + plugin._coverage_plugin_name
                     info = plugin.sys_info()
-                    write_formatted_info(self.debug, header, info)
+                    write_formatted_info(self._debug, header, info)
                 wrote_any = True
 
         if wrote_any:
-            write_formatted_info(self.debug, "end", ())
-
-    def _canonical_path(self, morf, directory=False):
-        """Return the canonical path of the module or file `morf`.
-
-        If the module is a package, then return its directory. If it is a
-        module, then return its file, unless `directory` is True, in which
-        case return its enclosing directory.
-
-        """
-        morf_path = PythonFileReporter(morf, self).filename
-        if morf_path.endswith("__init__.py") or directory:
-            morf_path = os.path.split(morf_path)[0]
-        return morf_path
-
-    def _name_for_module(self, module_globals, filename):
-        """Get the name of the module for a set of globals and file name.
-
-        For configurability's sake, we allow __main__ modules to be matched by
-        their importable name.
-
-        If loaded via runpy (aka -m), we can usually recover the "original"
-        full dotted module name, otherwise, we resort to interpreting the
-        file name to get the module's name.  In the case that the module name
-        can't be determined, None is returned.
-
-        """
-        if module_globals is None:          # pragma: only ironpython
-            # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296
-            module_globals = {}
-
-        dunder_name = module_globals.get('__name__', None)
-
-        if isinstance(dunder_name, str) and dunder_name != '__main__':
-            # This is the usual case: an imported module.
-            return dunder_name
-
-        loader = module_globals.get('__loader__', None)
-        for attrname in ('fullname', 'name'):   # attribute renamed in py3.2
-            if hasattr(loader, attrname):
-                fullname = getattr(loader, attrname)
-            else:
-                continue
-
-            if isinstance(fullname, str) and fullname != '__main__':
-                # Module loaded via: runpy -m
-                return fullname
-
-        # Script as first argument to Python command line.
-        inspectedname = inspect.getmodulename(filename)
-        if inspectedname is not None:
-            return inspectedname
-        else:
-            return dunder_name
-
-    def _should_trace_internal(self, filename, frame):
-        """Decide whether to trace execution in `filename`, with a reason.
-
-        This function is called from the trace function.  As each new file name
-        is encountered, this function determines whether it is traced or not.
-
-        Returns a FileDisposition object.
-
-        """
-        original_filename = filename
-        disp = _disposition_init(self.collector.file_disposition_class, filename)
-
-        def nope(disp, reason):
-            """Simple helper to make it easy to return NO."""
-            disp.trace = False
-            disp.reason = reason
-            return disp
-
-        # Compiled Python files have two file names: frame.f_code.co_filename is
-        # the file name at the time the .pyc was compiled.  The second name is
-        # __file__, which is where the .pyc was actually loaded from.  Since
-        # .pyc files can be moved after compilation (for example, by being
-        # installed), we look for __file__ in the frame and prefer it to the
-        # co_filename value.
-        dunder_file = frame.f_globals and frame.f_globals.get('__file__')
-        if dunder_file:
-            filename = source_for_file(dunder_file)
-            if original_filename and not original_filename.startswith('<'):
-                orig = os.path.basename(original_filename)
-                if orig != os.path.basename(filename):
-                    # Files shouldn't be renamed when moved. This happens when
-                    # exec'ing code.  If it seems like something is wrong with
-                    # the frame's file name, then just use the original.
-                    filename = original_filename
-
-        if not filename:
-            # Empty string is pretty useless.
-            return nope(disp, "empty string isn't a file name")
-
-        if filename.startswith('memory:'):
-            return nope(disp, "memory isn't traceable")
-
-        if filename.startswith('<'):
-            # Lots of non-file execution is represented with artificial
-            # file names like "<string>", "<doctest readme.txt[0]>", or
-            # "<exec_function>".  Don't ever trace these executions, since we
-            # can't do anything with the data later anyway.
-            return nope(disp, "not a real file name")
-
-        # pyexpat does a dumb thing, calling the trace function explicitly from
-        # C code with a C file name.
-        if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
-            return nope(disp, "pyexpat lies about itself")
-
-        # Jython reports the .class file to the tracer, use the source file.
-        if filename.endswith("$py.class"):
-            filename = filename[:-9] + ".py"
-
-        canonical = canonical_filename(filename)
-        disp.canonical_filename = canonical
-
-        # Try the plugins, see if they have an opinion about the file.
-        plugin = None
-        for plugin in self.plugins.file_tracers:
-            if not plugin._coverage_enabled:
-                continue
-
-            try:
-                file_tracer = plugin.file_tracer(canonical)
-                if file_tracer is not None:
-                    file_tracer._coverage_plugin = plugin
-                    disp.trace = True
-                    disp.file_tracer = file_tracer
-                    if file_tracer.has_dynamic_source_filename():
-                        disp.has_dynamic_filename = True
-                    else:
-                        disp.source_filename = canonical_filename(
-                            file_tracer.source_filename()
-                        )
-                    break
-            except Exception:
-                self._warn(
-                    "Disabling plug-in %r due to an exception:" % (
-                        plugin._coverage_plugin_name
-                    )
-                )
-                traceback.print_exc()
-                plugin._coverage_enabled = False
-                continue
-        else:
-            # No plugin wanted it: it's Python.
-            disp.trace = True
-            disp.source_filename = canonical
-
-        if not disp.has_dynamic_filename:
-            if not disp.source_filename:
-                raise CoverageException(
-                    "Plugin %r didn't set source_filename for %r" %
-                    (plugin, disp.original_filename)
-                )
-            reason = self._check_include_omit_etc_internal(
-                disp.source_filename, frame,
-            )
-            if reason:
-                nope(disp, reason)
-
-        return disp
-
-    def _check_include_omit_etc_internal(self, filename, frame):
-        """Check a file name against the include, omit, etc, rules.
-
-        Returns a string or None.  String means, don't trace, and is the reason
-        why.  None means no reason found to not trace.
-
-        """
-        modulename = self._name_for_module(frame.f_globals, filename)
-
-        # If the user specified source or include, then that's authoritative
-        # about the outer bound of what to measure and we don't have to apply
-        # any canned exclusions. If they didn't, then we have to exclude the
-        # stdlib and coverage.py directories.
-        if self.source_match:
-            if self.source_pkgs_match.match(modulename):
-                if modulename in self.source_pkgs_unmatched:
-                    self.source_pkgs_unmatched.remove(modulename)
-            elif not self.source_match.match(filename):
-                return "falls outside the --source trees"
-        elif self.include_match:
-            if not self.include_match.match(filename):
-                return "falls outside the --include trees"
-        else:
-            # If we aren't supposed to trace installed code, then check if this
-            # is near the Python standard library and skip it if so.
-            if self.pylib_match and self.pylib_match.match(filename):
-                return "is in the stdlib"
-
-            # We exclude the coverage.py code itself, since a little of it
-            # will be measured otherwise.
-            if self.cover_match and self.cover_match.match(filename):
-                return "is part of coverage.py"
-
-        # Check the file against the omit pattern.
-        if self.omit_match and self.omit_match.match(filename):
-            return "is inside an --omit pattern"
-
-        # No reason found to skip this file.
-        return None
+            write_formatted_info(self._debug, "end", ())
 
     def _should_trace(self, filename, frame):
         """Decide whether to trace execution in `filename`.
@@ -588,9 +313,9 @@
         Calls `_should_trace_internal`, and returns the FileDisposition.
 
         """
-        disp = self._should_trace_internal(filename, frame)
-        if self.debug.should('trace'):
-            self.debug.write(_disposition_debug_msg(disp))
+        disp = self._inorout.should_trace(filename, frame)
+        if self._debug.should('trace'):
+            self._debug.write(disposition_debug_msg(disp))
         return disp
 
     def _check_include_omit_etc(self, filename, frame):
@@ -599,32 +324,42 @@
         Returns a boolean: True if the file should be traced, False if not.
 
         """
-        reason = self._check_include_omit_etc_internal(filename, frame)
-        if self.debug.should('trace'):
+        reason = self._inorout.check_include_omit_etc(filename, frame)
+        if self._debug.should('trace'):
             if not reason:
                 msg = "Including %r" % (filename,)
             else:
                 msg = "Not including %r: %s" % (filename, reason)
-            self.debug.write(msg)
+            self._debug.write(msg)
 
         return not reason
 
-    def _warn(self, msg, slug=None):
+    def _warn(self, msg, slug=None, once=False):
         """Use `msg` as a warning.
 
         For warning suppression, use `slug` as the shorthand.
+
+        If `once` is true, only show this warning once (determined by the
+        slug.)
+
         """
-        if slug in self.config.disable_warnings:
+        if self._no_warn_slugs is None:
+            self._no_warn_slugs = list(self.config.disable_warnings)
+
+        if slug in self._no_warn_slugs:
             # Don't issue the warning
             return
 
         self._warnings.append(msg)
         if slug:
             msg = "%s (%s)" % (msg, slug)
-        if self.debug.should('pid'):
+        if self._debug.should('pid'):
             msg = "[%d] %s" % (os.getpid(), msg)
         sys.stderr.write("Coverage.py warning: %s\n" % msg)
 
+        if once:
+            self._no_warn_slugs.append(slug)
+
     def get_option(self, option_name):
         """Get an option from the configuration.
 
@@ -664,17 +399,107 @@
         """
         self.config.set_option(option_name, value)
 
-    def use_cache(self, usecache):
-        """Obsolete method."""
-        self._init()
-        if not usecache:
-            self._warn("use_cache(False) is no longer supported.")
-
     def load(self):
         """Load previously-collected coverage data from the data file."""
         self._init()
-        self.collector.reset()
-        self.data_files.read(self.data)
+        if self._collector:
+            self._collector.reset()
+        should_skip = self.config.parallel and not os.path.exists(self.config.data_file)
+        if not should_skip:
+            self._init_data(suffix=None)
+        self._post_init()
+        if not should_skip:
+            self._data.read()
+
+    def _init_for_start(self):
+        """Initialization for start()"""
+        # Construct the collector.
+        concurrency = self.config.concurrency or ()
+        if "multiprocessing" in concurrency:
+            if not patch_multiprocessing:
+                raise CoverageException(                    # pragma: only jython
+                    "multiprocessing is not supported on this Python"
+                )
+            patch_multiprocessing(rcfile=self.config.config_file)
+
+        dycon = self.config.dynamic_context
+        if not dycon or dycon == "none":
+            context_switchers = []
+        elif dycon == "test_function":
+            context_switchers = [should_start_context_test_function]
+        else:
+            raise CoverageException(
+                "Don't understand dynamic_context setting: {!r}".format(dycon)
+            )
+
+        context_switchers.extend(
+            plugin.dynamic_context for plugin in self._plugins.context_switchers
+        )
+
+        should_start_context = combine_context_switchers(context_switchers)
+
+        self._collector = Collector(
+            should_trace=self._should_trace,
+            check_include=self._check_include_omit_etc,
+            should_start_context=should_start_context,
+            file_mapper=self._file_mapper,
+            timid=self.config.timid,
+            branch=self.config.branch,
+            warn=self._warn,
+            concurrency=concurrency,
+            )
+
+        suffix = self._data_suffix_specified
+        if suffix or self.config.parallel:
+            if not isinstance(suffix, string_class):
+                # if data_suffix=True, use .machinename.pid.random
+                suffix = True
+        else:
+            suffix = None
+
+        self._init_data(suffix)
+
+        self._collector.use_data(self._data, self.config.context)
+
+        # Early warning if we aren't going to be able to support plugins.
+        if self._plugins.file_tracers and not self._collector.supports_plugins:
+            self._warn(
+                "Plugin file tracers (%s) aren't supported with %s" % (
+                    ", ".join(
+                        plugin._coverage_plugin_name
+                            for plugin in self._plugins.file_tracers
+                        ),
+                    self._collector.tracer_name(),
+                    )
+                )
+            for plugin in self._plugins.file_tracers:
+                plugin._coverage_enabled = False
+
+        # Create the file classifying substructure.
+        self._inorout = self._inorout_class(warn=self._warn)
+        self._inorout.configure(self.config)
+        self._inorout.plugins = self._plugins
+        self._inorout.disp_class = self._collector.file_disposition_class
+
+        # It's useful to write debug info after initing for start.
+        self._should_write_debug = True
+
+        atexit.register(self._atexit)
+
+    def _init_data(self, suffix):
+        """Create a data file if we don't have one yet."""
+        if self._data is None:
+            # Create the data file.  We do this at construction time so that the
+            # data file will be written into the directory where the process
+            # started rather than wherever the process eventually chdir'd to.
+            ensure_dir_for_file(self.config.data_file)
+            self._data = CoverageData(
+                basename=self.config.data_file,
+                suffix=suffix,
+                warn=self._warn,
+                debug=self._debug,
+                no_disk=self._no_disk,
+            )
 
     def start(self):
         """Start measuring code coverage.
@@ -688,45 +513,82 @@
 
         """
         self._init()
-        if self.include:
-            if self.source or self.source_pkgs:
-                self._warn("--include is ignored because --source is set", slug="include-ignored")
-        if self.run_suffix:
-            # Calling start() means we're running code, so use the run_suffix
-            # as the data_suffix when we eventually save the data.
-            self.data_suffix = self.run_suffix
+        if not self._inited_for_start:
+            self._inited_for_start = True
+            self._init_for_start()
+        self._post_init()
+
+        # Issue warnings for possible problems.
+        self._inorout.warn_conflicting_settings()
+
+        # See if we think some code that would eventually be measured has
+        # already been imported.
+        if self._warn_preimported_source:
+            self._inorout.warn_already_imported_files()
+
         if self._auto_load:
             self.load()
 
-        self.collector.start()
+        self._collector.start()
         self._started = True
+        self._instances.append(self)
 
     def stop(self):
         """Stop measuring code coverage."""
+        if self._instances:
+            if self._instances[-1] is self:
+                self._instances.pop()
         if self._started:
-            self.collector.stop()
+            self._collector.stop()
         self._started = False
 
     def _atexit(self):
         """Clean up on process shutdown."""
-        if self.debug.should("process"):
-            self.debug.write("atexit: {0!r}".format(self))
+        if self._debug.should("process"):
+            self._debug.write("atexit: {!r}".format(self))
         if self._started:
             self.stop()
         if self._auto_save:
             self.save()
 
     def erase(self):
-        """Erase previously-collected coverage data.
+        """Erase previously collected coverage data.
 
         This removes the in-memory data collected in this session as well as
         discarding the data file.
 
         """
         self._init()
-        self.collector.reset()
-        self.data.erase()
-        self.data_files.erase(parallel=self.config.parallel)
+        self._post_init()
+        if self._collector:
+            self._collector.reset()
+        self._init_data(suffix=None)
+        self._data.erase(parallel=self.config.parallel)
+        self._data = None
+        self._inited_for_start = False
+
+    def switch_context(self, new_context):
+        """Switch to a new dynamic context.
+
+        `new_context` is a string to use as the :ref:`dynamic context
+        <dynamic_contexts>` label for collected data.  If a :ref:`static
+        context <static_contexts>` is in use, the static and dynamic context
+        labels will be joined together with a pipe character.
+
+        Coverage collection must be started already.
+
+        .. versionadded:: 5.0
+
+        """
+        if not self._started:                           # pragma: part started
+            raise CoverageException(
+                "Cannot switch context, coverage is not started"
+                )
+
+        if self._collector.should_start_context:
+            self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True)
+
+        self._collector.switch_context(new_context)
 
     def clear_exclude(self, which='exclude'):
         """Clear the exclude list."""
@@ -777,9 +639,8 @@
 
     def save(self):
         """Save the collected coverage data to the data file."""
-        self._init()
-        self.get_data()
-        self.data_files.write(self.data, suffix=self.data_suffix)
+        data = self.get_data()
+        data.write()
 
     def combine(self, data_paths=None, strict=False):
         """Combine together a number of similarly-named coverage data files.
@@ -804,6 +665,8 @@
 
         """
         self._init()
+        self._init_data(suffix=None)
+        self._post_init()
         self.get_data()
 
         aliases = None
@@ -814,9 +677,7 @@
                 for pattern in paths[1:]:
                     aliases.add(pattern, result)
 
-        self.data_files.combine_parallel_data(
-            self.data, aliases=aliases, data_paths=data_paths, strict=strict,
-        )
+        combine_parallel_data(self._data, aliases=aliases, data_paths=data_paths, strict=strict)
 
     def get_data(self):
         """Get the collected data.
@@ -829,11 +690,13 @@
 
         """
         self._init()
+        self._init_data(suffix=None)
+        self._post_init()
 
-        if self.collector.save_data(self.data):
+        if self._collector and self._collector.flush_data():
             self._post_save_work()
 
-        return self.data
+        return self._data
 
     def _post_save_work(self):
         """After saving data, look for warnings, post-work, etc.
@@ -845,78 +708,21 @@
         # If there are still entries in the source_pkgs_unmatched list,
         # then we never encountered those packages.
         if self._warn_unimported_source:
-            for pkg in self.source_pkgs_unmatched:
-                self._warn_about_unmeasured_code(pkg)
+            self._inorout.warn_unimported_source()
 
         # Find out if we got any data.
-        if not self.data and self._warn_no_data:
+        if not self._data and self._warn_no_data:
             self._warn("No data was collected.", slug="no-data-collected")
 
-        # Find files that were never executed at all.
-        for pkg in self.source_pkgs:
-            if (not pkg in sys.modules or
-                not module_has_file(sys.modules[pkg])):
-                continue
-            pkg_file = source_for_file(sys.modules[pkg].__file__)
-            self._find_unexecuted_files(self._canonical_path(pkg_file))
-
-        for src in self.source:
-            self._find_unexecuted_files(src)
+        # Touch all the files that could have executed, so that we can
+        # mark completely unexecuted files as 0% covered.
+        if self._data is not None:
+            for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files():
+                file_path = self._file_mapper(file_path)
+                self._data.touch_file(file_path, plugin_name)
 
         if self.config.note:
-            self.data.add_run_info(note=self.config.note)
-
-    def _warn_about_unmeasured_code(self, pkg):
-        """Warn about a package or module that we never traced.
-
-        `pkg` is a string, the name of the package or module.
-
-        """
-        mod = sys.modules.get(pkg)
-        if mod is None:
-            self._warn("Module %s was never imported." % pkg, slug="module-not-imported")
-            return
-
-        if module_is_namespace(mod):
-            # A namespace package. It's OK for this not to have been traced,
-            # since there is no code directly in it.
-            return
-
-        if not module_has_file(mod):
-            self._warn("Module %s has no Python source." % pkg, slug="module-not-python")
-            return
-
-        # The module was in sys.modules, and seems like a module with code, but
-        # we never measured it. I guess that means it was imported before
-        # coverage even started.
-        self._warn(
-            "Module %s was previously imported, but not measured" % pkg,
-            slug="module-not-measured",
-        )
-
-    def _find_plugin_files(self, src_dir):
-        """Get executable files from the plugins."""
-        for plugin in self.plugins.file_tracers:
-            for x_file in plugin.find_executable_files(src_dir):
-                yield x_file, plugin._coverage_plugin_name
-
-    def _find_unexecuted_files(self, src_dir):
-        """Find unexecuted files in `src_dir`.
-
-        Search for files in `src_dir` that are probably importable,
-        and add them as unexecuted files in `self.data`.
-
-        """
-        py_files = ((py_file, None) for py_file in find_python_files(src_dir))
-        plugin_files = self._find_plugin_files(src_dir)
-
-        for file_path, plugin_name in itertools.chain(py_files, plugin_files):
-            file_path = canonical_filename(file_path)
-            if self.omit_match and self.omit_match.match(file_path):
-                # Turns out this file was omitted, so don't pull it back
-                # in as unexecuted.
-                continue
-            self.data.touch_file(file_path, plugin_name)
+            self._warn("The '[run] note' setting is no longer supported.")
 
     # Backward compatibility with version 1.
     def analysis(self, morf):
@@ -941,7 +747,6 @@
         coverage data.
 
         """
-        self._init()
         analysis = self._analyze(morf)
         return (
             analysis.filename,
@@ -957,11 +762,16 @@
         Returns an `Analysis` object.
 
         """
-        self.get_data()
+        # All reporting comes through here, so do reporting initialization.
+        self._init()
+        Numbers.set_precision(self.config.precision)
+        self._post_init()
+
+        data = self.get_data()
         if not isinstance(it, FileReporter):
             it = self._get_file_reporter(it)
 
-        return Analysis(self.data, it)
+        return Analysis(data, it, self._file_mapper)
 
     def _get_file_reporter(self, morf):
         """Get a FileReporter for a module or file name."""
@@ -969,19 +779,19 @@
         file_reporter = "python"
 
         if isinstance(morf, string_class):
-            abs_morf = abs_file(morf)
-            plugin_name = self.data.file_tracer(abs_morf)
+            mapped_morf = self._file_mapper(morf)
+            plugin_name = self._data.file_tracer(mapped_morf)
             if plugin_name:
-                plugin = self.plugins.get(plugin_name)
+                plugin = self._plugins.get(plugin_name)
 
-        if plugin:
-            file_reporter = plugin.file_reporter(abs_morf)
-            if file_reporter is None:
-                raise CoverageException(
-                    "Plugin %r did not provide a file reporter for %r." % (
-                        plugin._coverage_plugin_name, morf
-                    )
-                )
+                if plugin:
+                    file_reporter = plugin.file_reporter(mapped_morf)
+                    if file_reporter is None:
+                        raise CoverageException(
+                            "Plugin %r did not provide a file reporter for %r." % (
+                                plugin._coverage_plugin_name, morf
+                            )
+                        )
 
         if file_reporter == "python":
             file_reporter = PythonFileReporter(morf, self)
@@ -1000,49 +810,70 @@
 
         """
         if not morfs:
-            morfs = self.data.measured_files()
+            morfs = self._data.measured_files()
 
-        # Be sure we have a list.
-        if not isinstance(morfs, (list, tuple)):
+        # Be sure we have a collection.
+        if not isinstance(morfs, (list, tuple, set)):
             morfs = [morfs]
 
-        file_reporters = []
-        for morf in morfs:
-            file_reporter = self._get_file_reporter(morf)
-            file_reporters.append(file_reporter)
-
+        file_reporters = [self._get_file_reporter(morf) for morf in morfs]
         return file_reporters
 
     def report(
         self, morfs=None, show_missing=None, ignore_errors=None,
-        file=None,                  # pylint: disable=redefined-builtin
-        omit=None, include=None, skip_covered=None,
+        file=None, omit=None, include=None, skip_covered=None,
+        contexts=None, skip_empty=None,
     ):
-        """Write a summary report to `file`.
+        """Write a textual summary report to `file`.
 
         Each module in `morfs` is listed, with counts of statements, executed
         statements, missing statements, and a list of lines missed.
 
+        If `show_missing` is true, then details of which lines or branches are
+        missing will be included in the report.  If `ignore_errors` is true,
+        then a failure while reporting a single file will not stop the entire
+        report.
+
+        `file` is a file-like object, suitable for writing.
+
         `include` is a list of file name patterns.  Files that match will be
         included in the report. Files matching `omit` will not be included in
         the report.
 
-        If `skip_covered` is True, don't report on files with 100% coverage.
+        If `skip_covered` is true, don't report on files with 100% coverage.
+
+        If `skip_empty` is true, don't report on empty files (those that have
+        no statements).
+
+        `contexts` is a list of regular expressions.  Only data from
+        :ref:`dynamic contexts <dynamic_contexts>` that match one of those
+        expressions (using :func:`re.search <python:re.search>`) will be
+        included in the report.
+
+        All of the arguments default to the settings read from the
+        :ref:`configuration file <config>`.
 
         Returns a float, the total percentage covered.
 
+        .. versionadded:: 4.0
+            The `skip_covered` parameter.
+
+        .. versionadded:: 5.0
+            The `contexts` and `skip_empty` parameters.
+
         """
-        self.get_data()
-        self.config.from_args(
+        with override_config(
+            self,
             ignore_errors=ignore_errors, report_omit=omit, report_include=include,
             show_missing=show_missing, skip_covered=skip_covered,
-            )
-        reporter = SummaryReporter(self, self.config)
-        return reporter.report(morfs, outfile=file)
+            report_contexts=contexts, skip_empty=skip_empty,
+        ):
+            reporter = SummaryReporter(self)
+            return reporter.report(morfs, outfile=file)
 
     def annotate(
         self, morfs=None, directory=None, ignore_errors=None,
-        omit=None, include=None,
+        omit=None, include=None, contexts=None,
     ):
         """Annotate a list of modules.
 
@@ -1054,16 +885,17 @@
         See :meth:`report` for other arguments.
 
         """
-        self.get_data()
-        self.config.from_args(
-            ignore_errors=ignore_errors, report_omit=omit, report_include=include
-            )
-        reporter = AnnotateReporter(self, self.config)
-        reporter.report(morfs, directory=directory)
+        with override_config(self,
+            ignore_errors=ignore_errors, report_omit=omit,
+            report_include=include, report_contexts=contexts,
+        ):
+            reporter = AnnotateReporter(self)
+            reporter.report(morfs, directory=directory)
 
     def html_report(self, morfs=None, directory=None, ignore_errors=None,
                     omit=None, include=None, extra_css=None, title=None,
-                    skip_covered=None):
+                    skip_covered=None, show_contexts=None, contexts=None,
+                    skip_empty=None):
         """Generate an HTML report.
 
         The HTML is written to `directory`.  The file "index.html" is the
@@ -1080,19 +912,25 @@
 
         Returns a float, the total percentage covered.
 
+        .. note::
+            The HTML report files are generated incrementally based on the
+            source files and coverage results. If you modify the report files,
+            the changes will not be considered.  You should be careful about
+            changing the files in the report folder.
+
         """
-        self.get_data()
-        self.config.from_args(
+        with override_config(self,
             ignore_errors=ignore_errors, report_omit=omit, report_include=include,
             html_dir=directory, extra_css=extra_css, html_title=title,
-            skip_covered=skip_covered,
-            )
-        reporter = HtmlReporter(self, self.config)
-        return reporter.report(morfs)
+            skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts,
+            skip_empty=skip_empty,
+        ):
+            reporter = HtmlReporter(self)
+            return reporter.report(morfs)
 
     def xml_report(
         self, morfs=None, outfile=None, ignore_errors=None,
-        omit=None, include=None,
+        omit=None, include=None, contexts=None,
     ):
         """Generate an XML report of coverage results.
 
@@ -1106,40 +944,35 @@
         Returns a float, the total percentage covered.
 
         """
-        self.get_data()
-        self.config.from_args(
+        with override_config(self,
             ignore_errors=ignore_errors, report_omit=omit, report_include=include,
-            xml_output=outfile,
-            )
-        file_to_close = None
-        delete_file = False
-        if self.config.xml_output:
-            if self.config.xml_output == '-':
-                outfile = sys.stdout
-            else:
-                # Ensure that the output directory is created; done here
-                # because this report pre-opens the output file.
-                # HTMLReport does this using the Report plumbing because
-                # its task is more complex, being multiple files.
-                output_dir = os.path.dirname(self.config.xml_output)
-                if output_dir and not os.path.isdir(output_dir):
-                    os.makedirs(output_dir)
-                open_kwargs = {}
-                if env.PY3:
-                    open_kwargs['encoding'] = 'utf8'
-                outfile = open(self.config.xml_output, "w", **open_kwargs)
-                file_to_close = outfile
-        try:
-            reporter = XmlReporter(self, self.config)
-            return reporter.report(morfs, outfile=outfile)
-        except CoverageException:
-            delete_file = True
-            raise
-        finally:
-            if file_to_close:
-                file_to_close.close()
-                if delete_file:
-                    file_be_gone(self.config.xml_output)
+            xml_output=outfile, report_contexts=contexts,
+        ):
+            return render_report(self.config.xml_output, XmlReporter(self), morfs)
+
+    def json_report(
+        self, morfs=None, outfile=None, ignore_errors=None,
+        omit=None, include=None, contexts=None, pretty_print=None,
+        show_contexts=None
+    ):
+        """Generate a JSON report of coverage results.
+
+        Each module in `morfs` is included in the report.  `outfile` is the
+        path to write the file to, "-" will write to stdout.
+
+        See :meth:`report` for other arguments.
+
+        Returns a float, the total percentage covered.
+
+        .. versionadded:: 5.0
+
+        """
+        with override_config(self,
+            ignore_errors=ignore_errors, report_omit=omit, report_include=include,
+            json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print,
+            json_show_contexts=show_contexts
+        ):
+            return render_report(self.config.json_output, JsonReporter(self), morfs)
 
     def sys_info(self):
         """Return a list of (key, value) pairs showing internal information."""
@@ -1147,6 +980,7 @@
         import coverage as covmod
 
         self._init()
+        self._post_init()
 
         def plugin_info(plugins):
             """Make an entry for the sys_info from a list of plug-ins."""
@@ -1161,84 +995,51 @@
         info = [
             ('version', covmod.__version__),
             ('coverage', covmod.__file__),
-            ('cover_paths', self.cover_paths),
-            ('pylib_paths', self.pylib_paths),
-            ('tracer', self.collector.tracer_name()),
-            ('plugins.file_tracers', plugin_info(self.plugins.file_tracers)),
-            ('plugins.configurers', plugin_info(self.plugins.configurers)),
-            ('config_files', self.config.attempted_config_files),
-            ('configs_read', self.config.config_files),
-            ('data_path', self.data_files.filename),
+            ('tracer', self._collector.tracer_name() if self._collector else "-none-"),
+            ('CTracer', 'available' if CTracer else "unavailable"),
+            ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)),
+            ('plugins.configurers', plugin_info(self._plugins.configurers)),
+            ('plugins.context_switchers', plugin_info(self._plugins.context_switchers)),
+            ('configs_attempted', self.config.attempted_config_files),
+            ('configs_read', self.config.config_files_read),
+            ('config_file', self.config.config_file),
+            ('config_contents',
+                repr(self.config._config_contents)
+                if self.config._config_contents
+                else '-none-'
+            ),
+            ('data_file', self._data.data_filename() if self._data is not None else "-none-"),
             ('python', sys.version.replace('\n', '')),
             ('platform', platform.platform()),
             ('implementation', platform.python_implementation()),
             ('executable', sys.executable),
+            ('def_encoding', sys.getdefaultencoding()),
+            ('fs_encoding', sys.getfilesystemencoding()),
+            ('pid', os.getpid()),
             ('cwd', os.getcwd()),
             ('path', sys.path),
             ('environment', sorted(
                 ("%s = %s" % (k, v))
                 for k, v in iitems(os.environ)
-                if k.startswith(("COV", "PY"))
+                if any(slug in k for slug in ("COV", "PY"))
             )),
-            ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
+            ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))),
             ]
 
-        matcher_names = [
-            'source_match', 'source_pkgs_match',
-            'include_match', 'omit_match',
-            'cover_match', 'pylib_match',
-            ]
+        if self._inorout:
+            info.extend(self._inorout.sys_info())
 
-        for matcher_name in matcher_names:
-            matcher = getattr(self, matcher_name)
-            if matcher:
-                matcher_info = matcher.info()
-            else:
-                matcher_info = '-none-'
-            info.append((matcher_name, matcher_info))
+        info.extend(CoverageData.sys_info())
 
         return info
 
 
-def module_is_namespace(mod):
-    """Is the module object `mod` a PEP420 namespace module?"""
-    return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
-
-
-def module_has_file(mod):
-    """Does the module object `mod` have an existing __file__ ?"""
-    mod__file__ = getattr(mod, '__file__', None)
-    if mod__file__ is None:
-        return False
-    return os.path.exists(mod__file__)
-
-
-# FileDisposition "methods": FileDisposition is a pure value object, so it can
-# be implemented in either C or Python.  Acting on them is done with these
-# functions.
+# Mega debugging...
+# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage.
+if int(os.environ.get("COVERAGE_DEBUG_CALLS", 0)):              # pragma: debugging
+    from coverage.debug import decorate_methods, show_calls
 
-def _disposition_init(cls, original_filename):
-    """Construct and initialize a new FileDisposition object."""
-    disp = cls()
-    disp.original_filename = original_filename
-    disp.canonical_filename = original_filename
-    disp.source_filename = None
-    disp.trace = False
-    disp.reason = ""
-    disp.file_tracer = None
-    disp.has_dynamic_filename = False
-    return disp
-
-
-def _disposition_debug_msg(disp):
-    """Make a nice debug message of what the FileDisposition is doing."""
-    if disp.trace:
-        msg = "Tracing %r" % (disp.original_filename,)
-        if disp.file_tracer:
-            msg += ": will be traced by %r" % disp.file_tracer
-    else:
-        msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
-    return msg
+    Coverage = decorate_methods(show_calls(show_args=True), butnot=['get_data'])(Coverage)
 
 
 def process_startup():
@@ -1286,10 +1087,11 @@
 
     cov = Coverage(config_file=cps)
     process_startup.coverage = cov
-    cov.start()
     cov._warn_no_data = False
     cov._warn_unimported_source = False
+    cov._warn_preimported_source = False
     cov._auto_save = True
+    cov.start()
 
     return cov
 
--- a/eric6/DebugClients/Python/coverage/data.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/data.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,794 +1,124 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Coverage data for coverage.py.
 
-"""Coverage data for coverage.py."""
+This file had the 4.x JSON data support, which is now gone.  This file still
+has storage-agnostic helpers, and is kept to avoid changing too many imports.
+CoverageData is now defined in sqldata.py, and imported here to keep the
+imports working.
+
+"""
 
 import glob
-import itertools
-import json
-import optparse
-import os
 import os.path
-import random
-import re
-import socket
 
-from coverage import env
-from coverage.backward import iitems, string_class
-from coverage.debug import _TEST_NAME_FILE
-from coverage.files import PathAliases
-from coverage.misc import CoverageException, file_be_gone, isolate_module
-
-os = isolate_module(os)
+from coverage.misc import CoverageException, file_be_gone
+from coverage.sqldata import CoverageData
 
 
-class CoverageData(object):
-    """Manages collected coverage data, including file storage.
-
-    This class is the public supported API to the data coverage.py collects
-    during program execution.  It includes information about what code was
-    executed. It does not include information from the analysis phase, to
-    determine what lines could have been executed, or what lines were not
-    executed.
+def line_counts(data, fullpath=False):
+    """Return a dict summarizing the line coverage data.
 
-    .. note::
+    Keys are based on the file names, and values are the number of executed
+    lines.  If `fullpath` is true, then the keys are the full pathnames of
+    the files, otherwise they are the basenames of the files.
 
-        The file format is not documented or guaranteed.  It will change in
-        the future, in possibly complicated ways.  Do not read coverage.py
-        data files directly.  Use this API to avoid disruption.
-
-    There are a number of kinds of data that can be collected:
-
-    * **lines**: the line numbers of source lines that were executed.
-      These are always available.
+    Returns a dict mapping file names to counts of lines.
 
-    * **arcs**: pairs of source and destination line numbers for transitions
-      between source lines.  These are only available if branch coverage was
-      used.
-
-    * **file tracer names**: the module names of the file tracer plugins that
-      handled each file in the data.
-
-    * **run information**: information about the program execution.  This is
-      written during "coverage run", and then accumulated during "coverage
-      combine".
-
-    Lines, arcs, and file tracer names are stored for each source file. File
-    names in this API are case-sensitive, even on platforms with
-    case-insensitive file systems.
-
-    To read a coverage.py data file, use :meth:`read_file`, or
-    :meth:`read_fileobj` if you have an already-opened file.  You can then
-    access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
-    or :meth:`file_tracer`.  Run information is available with
-    :meth:`run_infos`.
-
-    The :meth:`has_arcs` method indicates whether arc data is available.  You
-    can get a list of the files in the data with :meth:`measured_files`.
-    A summary of the line data is available from :meth:`line_counts`.  As with
-    most Python containers, you can determine if there is any data at all by
-    using this object as a boolean value.
+    """
+    summ = {}
+    if fullpath:
+        filename_fn = lambda f: f
+    else:
+        filename_fn = os.path.basename
+    for filename in data.measured_files():
+        summ[filename_fn(filename)] = len(data.lines(filename))
+    return summ
 
 
-    Most data files will be created by coverage.py itself, but you can use
-    methods here to create data files if you like.  The :meth:`add_lines`,
-    :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
-    that are convenient for coverage.py.  The :meth:`add_run_info` method adds
-    key-value pairs to the run information.
+def add_data_to_hash(data, filename, hasher):
+    """Contribute `filename`'s data to the `hasher`.
 
-    To add a file without any measured data, use :meth:`touch_file`.
-
-    You write to a named file with :meth:`write_file`, or to an already opened
-    file with :meth:`write_fileobj`.
-
-    You can clear the data in memory with :meth:`erase`.  Two data collections
-    can be combined by using :meth:`update` on one :class:`CoverageData`,
-    passing it the other.
+    `hasher` is a `coverage.misc.Hasher` instance to be updated with
+    the file's data.  It should only get the results data, not the run
+    data.
 
     """
-
-    # The data file format is JSON, with these keys:
-    #
-    #     * lines: a dict mapping file names to lists of line numbers
-    #       executed::
-    #
-    #         { "file1": [17,23,45], "file2": [1,2,3], ... }
-    #
-    #     * arcs: a dict mapping file names to lists of line number pairs::
-    #
-    #         { "file1": [[17,23], [17,25], [25,26]], ... }
-    #
-    #     * file_tracers: a dict mapping file names to plugin names::
-    #
-    #         { "file1": "django.coverage", ... }
-    #
-    #     * runs: a list of dicts of information about the coverage.py runs
-    #       contributing to the data::
-    #
-    #         [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
-    #
-    # Only one of `lines` or `arcs` will be present: with branch coverage, data
-    # is stored as arcs. Without branch coverage, it is stored as lines.  The
-    # line data is easily recovered from the arcs: it is all the first elements
-    # of the pairs that are greater than zero.
-
-    def __init__(self, debug=None):
-        """Create a CoverageData.
-
-        `debug` is a `DebugControl` object for writing debug messages.
-
-        """
-        self._debug = debug
-
-        # A map from canonical Python source file name to a dictionary in
-        # which there's an entry for each line number that has been
-        # executed:
-        #
-        #   { 'filename1.py': [12, 47, 1001], ... }
-        #
-        self._lines = None
-
-        # A map from canonical Python source file name to a dictionary with an
-        # entry for each pair of line numbers forming an arc:
-        #
-        #   { 'filename1.py': [(12,14), (47,48), ... ], ... }
-        #
-        self._arcs = None
-
-        # A map from canonical source file name to a plugin module name:
-        #
-        #   { 'filename1.py': 'django.coverage', ... }
-        #
-        self._file_tracers = {}
-
-        # A list of dicts of information about the coverage.py runs.
-        self._runs = []
-
-    def __repr__(self):
-        return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
-            klass=self.__class__.__name__,
-            lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
-            arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
-            tracers="{{{0}}}".format(len(self._file_tracers)),
-            runs="[{0}]".format(len(self._runs)),
-        )
-
-    ##
-    ## Reading data
-    ##
-
-    def has_arcs(self):
-        """Does this data have arcs?
-
-        Arc data is only available if branch coverage was used during
-        collection.
-
-        Returns a boolean.
-
-        """
-        return self._has_arcs()
-
-    def lines(self, filename):
-        """Get the list of lines executed for a file.
-
-        If the file was not measured, returns None.  A file might be measured,
-        and have no lines executed, in which case an empty list is returned.
-
-        If the file was executed, returns a list of integers, the line numbers
-        executed in the file. The list is in no particular order.
-
-        """
-        if self._arcs is not None:
-            arcs = self._arcs.get(filename)
-            if arcs is not None:
-                all_lines = itertools.chain.from_iterable(arcs)
-                return list(set(l for l in all_lines if l > 0))
-        elif self._lines is not None:
-            return self._lines.get(filename)
-        return None
-
-    def arcs(self, filename):
-        """Get the list of arcs executed for a file.
-
-        If the file was not measured, returns None.  A file might be measured,
-        and have no arcs executed, in which case an empty list is returned.
-
-        If the file was executed, returns a list of 2-tuples of integers. Each
-        pair is a starting line number and an ending line number for a
-        transition from one line to another. The list is in no particular
-        order.
-
-        Negative numbers have special meaning.  If the starting line number is
-        -N, it represents an entry to the code object that starts at line N.
-        If the ending ling number is -N, it's an exit from the code object that
-        starts at line N.
-
-        """
-        if self._arcs is not None:
-            if filename in self._arcs:
-                return self._arcs[filename]
-        return None
-
-    def file_tracer(self, filename):
-        """Get the plugin name of the file tracer for a file.
-
-        Returns the name of the plugin that handles this file.  If the file was
-        measured, but didn't use a plugin, then "" is returned.  If the file
-        was not measured, then None is returned.
-
-        """
-        # Because the vast majority of files involve no plugin, we don't store
-        # them explicitly in self._file_tracers.  Check the measured data
-        # instead to see if it was a known file with no plugin.
-        if filename in (self._arcs or self._lines or {}):
-            return self._file_tracers.get(filename, "")
-        return None
-
-    def run_infos(self):
-        """Return the list of dicts of run information.
-
-        For data collected during a single run, this will be a one-element
-        list.  If data has been combined, there will be one element for each
-        original data file.
-
-        """
-        return self._runs
-
-    def measured_files(self):
-        """A list of all files that had been measured."""
-        return list(self._arcs or self._lines or {})
-
-    def line_counts(self, fullpath=False):
-        """Return a dict summarizing the line coverage data.
-
-        Keys are based on the file names, and values are the number of executed
-        lines.  If `fullpath` is true, then the keys are the full pathnames of
-        the files, otherwise they are the basenames of the files.
-
-        Returns a dict mapping file names to counts of lines.
-
-        """
-        summ = {}
-        if fullpath:
-            filename_fn = lambda f: f
-        else:
-            filename_fn = os.path.basename
-        for filename in self.measured_files():
-            summ[filename_fn(filename)] = len(self.lines(filename))
-        return summ
-
-    def __nonzero__(self):
-        return bool(self._lines or self._arcs)
-
-    __bool__ = __nonzero__
-
-    def read_fileobj(self, file_obj):
-        """Read the coverage data from the given file object.
-
-        Should only be used on an empty CoverageData object.
-
-        """
-        data = self._read_raw_data(file_obj)
-
-        self._lines = self._arcs = None
-
-        if 'lines' in data:
-            self._lines = data['lines']
-        if 'arcs' in data:
-            self._arcs = dict(
-                (fname, [tuple(pair) for pair in arcs])
-                for fname, arcs in iitems(data['arcs'])
-            )
-        self._file_tracers = data.get('file_tracers', {})
-        self._runs = data.get('runs', [])
-
-        self._validate()
-
-    def read_file(self, filename):
-        """Read the coverage data from `filename` into this object."""
-        if self._debug and self._debug.should('dataio'):
-            self._debug.write("Reading data from %r" % (filename,))
-        try:
-            with self._open_for_reading(filename) as f:
-                self.read_fileobj(f)
-        except Exception as exc:
-            raise CoverageException(
-                "Couldn't read data from '%s': %s: %s" % (
-                    filename, exc.__class__.__name__, exc,
-                )
-            )
-
-    _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
-
-    @classmethod
-    def _open_for_reading(cls, filename):
-        """Open a file appropriately for reading data."""
-        return open(filename, "r")
-
-    @classmethod
-    def _read_raw_data(cls, file_obj):
-        """Read the raw data from a file object."""
-        go_away = file_obj.read(len(cls._GO_AWAY))
-        if go_away != cls._GO_AWAY:
-            raise CoverageException("Doesn't seem to be a coverage.py data file")
-        return json.load(file_obj)
-
-    @classmethod
-    def _read_raw_data_file(cls, filename):
-        """Read the raw data from a file, for debugging."""
-        with cls._open_for_reading(filename) as f:
-            return cls._read_raw_data(f)
-
-    ##
-    ## Writing data
-    ##
-
-    def add_lines(self, line_data):
-        """Add measured line data.
-
-        `line_data` is a dictionary mapping file names to dictionaries::
-
-            { filename: { lineno: None, ... }, ...}
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding lines: %d files, %d lines total" % (
-                len(line_data), sum(len(lines) for lines in line_data.values())
-            ))
-        if self._has_arcs():
-            raise CoverageException("Can't add lines to existing arc data")
-
-        if self._lines is None:
-            self._lines = {}
-        for filename, linenos in iitems(line_data):
-            if filename in self._lines:
-                new_linenos = set(self._lines[filename])
-                new_linenos.update(linenos)
-                linenos = new_linenos
-            self._lines[filename] = list(linenos)
-
-        self._validate()
-
-    def add_arcs(self, arc_data):
-        """Add measured arc data.
-
-        `arc_data` is a dictionary mapping file names to dictionaries::
-
-            { filename: { (l1,l2): None, ... }, ...}
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding arcs: %d files, %d arcs total" % (
-                len(arc_data), sum(len(arcs) for arcs in arc_data.values())
-            ))
-        if self._has_lines():
-            raise CoverageException("Can't add arcs to existing line data")
-
-        if self._arcs is None:
-            self._arcs = {}
-        for filename, arcs in iitems(arc_data):
-            if filename in self._arcs:
-                new_arcs = set(self._arcs[filename])
-                new_arcs.update(arcs)
-                arcs = new_arcs
-            self._arcs[filename] = list(arcs)
-
-        self._validate()
-
-    def add_file_tracers(self, file_tracers):
-        """Add per-file plugin information.
-
-        `file_tracers` is { filename: plugin_name, ... }
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
-
-        existing_files = self._arcs or self._lines or {}
-        for filename, plugin_name in iitems(file_tracers):
-            if filename not in existing_files:
-                raise CoverageException(
-                    "Can't add file tracer data for unmeasured file '%s'" % (filename,)
-                )
-            existing_plugin = self._file_tracers.get(filename)
-            if existing_plugin is not None and plugin_name != existing_plugin:
-                raise CoverageException(
-                    "Conflicting file tracer name for '%s': %r vs %r" % (
-                        filename, existing_plugin, plugin_name,
-                    )
-                )
-            self._file_tracers[filename] = plugin_name
-
-        self._validate()
-
-    def add_run_info(self, **kwargs):
-        """Add information about the run.
-
-        Keywords are arbitrary, and are stored in the run dictionary. Values
-        must be JSON serializable.  You may use this function more than once,
-        but repeated keywords overwrite each other.
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding run info: %r" % (kwargs,))
-        if not self._runs:
-            self._runs = [{}]
-        self._runs[0].update(kwargs)
-        self._validate()
-
-    def touch_file(self, filename, plugin_name=""):
-        """Ensure that `filename` appears in the data, empty if needed.
-
-        `plugin_name` is the name of the plugin resposible for this file. It is used
-        to associate the right filereporter, etc.
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Touching %r" % (filename,))
-        if not self._has_arcs() and not self._has_lines():
-            raise CoverageException("Can't touch files in an empty CoverageData")
-
-        if self._has_arcs():
-            where = self._arcs
-        else:
-            where = self._lines
-        where.setdefault(filename, [])
-        if plugin_name:
-            # Set the tracer for this file
-            self._file_tracers[filename] = plugin_name
-
-        self._validate()
-
-    def write_fileobj(self, file_obj):
-        """Write the coverage data to `file_obj`."""
-
-        # Create the file data.
-        file_data = {}
-
-        if self._has_arcs():
-            file_data['arcs'] = self._arcs
-
-        if self._has_lines():
-            file_data['lines'] = self._lines
-
-        if self._file_tracers:
-            file_data['file_tracers'] = self._file_tracers
-
-        if self._runs:
-            file_data['runs'] = self._runs
-
-        # Write the data to the file.
-        file_obj.write(self._GO_AWAY)
-        json.dump(file_data, file_obj, separators=(',', ':'))
-
-    def write_file(self, filename):
-        """Write the coverage data to `filename`."""
-        if self._debug and self._debug.should('dataio'):
-            self._debug.write("Writing data to %r" % (filename,))
-        with open(filename, 'w') as fdata:
-            self.write_fileobj(fdata)
-
-    def erase(self):
-        """Erase the data in this object."""
-        self._lines = None
-        self._arcs = None
-        self._file_tracers = {}
-        self._runs = []
-        self._validate()
-
-    def update(self, other_data, aliases=None):
-        """Update this data with data from another `CoverageData`.
-
-        If `aliases` is provided, it's a `PathAliases` object that is used to
-        re-map paths to match the local machine's.
-
-        """
-        if self._has_lines() and other_data._has_arcs():
-            raise CoverageException("Can't combine arc data with line data")
-        if self._has_arcs() and other_data._has_lines():
-            raise CoverageException("Can't combine line data with arc data")
-
-        aliases = aliases or PathAliases()
-
-        # _file_tracers: only have a string, so they have to agree.
-        # Have to do these first, so that our examination of self._arcs and
-        # self._lines won't be confused by data updated from other_data.
-        for filename in other_data.measured_files():
-            other_plugin = other_data.file_tracer(filename)
-            filename = aliases.map(filename)
-            this_plugin = self.file_tracer(filename)
-            if this_plugin is None:
-                if other_plugin:
-                    self._file_tracers[filename] = other_plugin
-            elif this_plugin != other_plugin:
-                raise CoverageException(
-                    "Conflicting file tracer name for '%s': %r vs %r" % (
-                        filename, this_plugin, other_plugin,
-                    )
-                )
-
-        # _runs: add the new runs to these runs.
-        self._runs.extend(other_data._runs)
-
-        # _lines: merge dicts.
-        if other_data._has_lines():
-            if self._lines is None:
-                self._lines = {}
-            for filename, file_lines in iitems(other_data._lines):
-                filename = aliases.map(filename)
-                if filename in self._lines:
-                    lines = set(self._lines[filename])
-                    lines.update(file_lines)
-                    file_lines = list(lines)
-                self._lines[filename] = file_lines
-
-        # _arcs: merge dicts.
-        if other_data._has_arcs():
-            if self._arcs is None:
-                self._arcs = {}
-            for filename, file_arcs in iitems(other_data._arcs):
-                filename = aliases.map(filename)
-                if filename in self._arcs:
-                    arcs = set(self._arcs[filename])
-                    arcs.update(file_arcs)
-                    file_arcs = list(arcs)
-                self._arcs[filename] = file_arcs
-
-        self._validate()
-
-    ##
-    ## Miscellaneous
-    ##
-
-    def _validate(self):
-        """If we are in paranoid mode, validate that everything is right."""
-        if env.TESTING:
-            self._validate_invariants()
-
-    def _validate_invariants(self):
-        """Validate internal invariants."""
-        # Only one of _lines or _arcs should exist.
-        assert not(self._has_lines() and self._has_arcs()), (
-            "Shouldn't have both _lines and _arcs"
-        )
-
-        # _lines should be a dict of lists of ints.
-        if self._has_lines():
-            for fname, lines in iitems(self._lines):
-                assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
-                assert all(isinstance(x, int) for x in lines), (
-                    "_lines[%r] shouldn't be %r" % (fname, lines)
-                )
-
-        # _arcs should be a dict of lists of pairs of ints.
-        if self._has_arcs():
-            for fname, arcs in iitems(self._arcs):
-                assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
-                assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
-                    "_arcs[%r] shouldn't be %r" % (fname, arcs)
-                )
-
-        # _file_tracers should have only non-empty strings as values.
-        for fname, plugin in iitems(self._file_tracers):
-            assert isinstance(fname, string_class), (
-                "Key in _file_tracers shouldn't be %r" % (fname,)
-            )
-            assert plugin and isinstance(plugin, string_class), (
-                "_file_tracers[%r] shoudn't be %r" % (fname, plugin)
-            )
-
-        # _runs should be a list of dicts.
-        for val in self._runs:
-            assert isinstance(val, dict)
-            for key in val:
-                assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
-
-    def add_to_hash(self, filename, hasher):
-        """Contribute `filename`'s data to the `hasher`.
-
-        `hasher` is a `coverage.misc.Hasher` instance to be updated with
-        the file's data.  It should only get the results data, not the run
-        data.
-
-        """
-        if self._has_arcs():
-            hasher.update(sorted(self.arcs(filename) or []))
-        else:
-            hasher.update(sorted(self.lines(filename) or []))
-        hasher.update(self.file_tracer(filename))
-
-    ##
-    ## Internal
-    ##
-
-    def _has_lines(self):
-        """Do we have data in self._lines?"""
-        return self._lines is not None
-
-    def _has_arcs(self):
-        """Do we have data in self._arcs?"""
-        return self._arcs is not None
+    if data.has_arcs():
+        hasher.update(sorted(data.arcs(filename) or []))
+    else:
+        hasher.update(sorted(data.lines(filename) or []))
+    hasher.update(data.file_tracer(filename))
 
 
-class CoverageDataFiles(object):
-    """Manage the use of coverage data files."""
-
-    def __init__(self, basename=None, warn=None, debug=None):
-        """Create a CoverageDataFiles to manage data files.
-
-        `warn` is the warning function to use.
-
-        `basename` is the name of the file to use for storing data.
-
-        `debug` is a `DebugControl` object for writing debug messages.
-
-        """
-        self.warn = warn
-        self.debug = debug
-
-        # Construct the file name that will be used for data storage.
-        self.filename = os.path.abspath(basename or ".coverage")
-
-    def erase(self, parallel=False):
-        """Erase the data from the file storage.
-
-        If `parallel` is true, then also deletes data files created from the
-        basename by parallel-mode.
+def combine_parallel_data(data, aliases=None, data_paths=None, strict=False):
+    """Combine a number of data files together.
 
-        """
-        if self.debug and self.debug.should('dataio'):
-            self.debug.write("Erasing data file %r" % (self.filename,))
-        file_be_gone(self.filename)
-        if parallel:
-            data_dir, local = os.path.split(self.filename)
-            localdot = local + '.*'
-            pattern = os.path.join(os.path.abspath(data_dir), localdot)
-            for filename in glob.glob(pattern):
-                if self.debug and self.debug.should('dataio'):
-                    self.debug.write("Erasing parallel data file %r" % (filename,))
-                file_be_gone(filename)
-
-    def read(self, data):
-        """Read the coverage data."""
-        if os.path.exists(self.filename):
-            data.read_file(self.filename)
-
-    def write(self, data, suffix=None):
-        """Write the collected coverage data to a file.
+    Treat `data.filename` as a file prefix, and combine the data from all
+    of the data files starting with that prefix plus a dot.
 
-        `suffix` is a suffix to append to the base file name. This can be used
-        for multiple or parallel execution, so that many coverage data files
-        can exist simultaneously.  A dot will be used to join the base name and
-        the suffix.
-
-        """
-        filename = self.filename
-        if suffix is True:
-            # If data_suffix was a simple true value, then make a suffix with
-            # plenty of distinguishing information.  We do this here in
-            # `save()` at the last minute so that the pid will be correct even
-            # if the process forks.
-            extra = ""
-            if _TEST_NAME_FILE:                             # pragma: debugging
-                with open(_TEST_NAME_FILE) as f:
-                    test_name = f.read()
-                extra = "." + test_name
-            dice = random.Random(os.urandom(8)).randint(0, 999999)
-            suffix = "%s%s.%s.%06d" % (socket.gethostname(), extra, os.getpid(), dice)
-
-        if suffix:
-            filename += "." + suffix
-        data.write_file(filename)
-
-    def combine_parallel_data(self, data, aliases=None, data_paths=None, strict=False):
-        """Combine a number of data files together.
+    If `aliases` is provided, it's a `PathAliases` object that is used to
+    re-map paths to match the local machine's.
 
-        Treat `self.filename` as a file prefix, and combine the data from all
-        of the data files starting with that prefix plus a dot.
-
-        If `aliases` is provided, it's a `PathAliases` object that is used to
-        re-map paths to match the local machine's.
-
-        If `data_paths` is provided, it is a list of directories or files to
-        combine.  Directories are searched for files that start with
-        `self.filename` plus dot as a prefix, and those files are combined.
-
-        If `data_paths` is not provided, then the directory portion of
-        `self.filename` is used as the directory to search for data files.
-
-        Every data file found and combined is then deleted from disk. If a file
-        cannot be read, a warning will be issued, and the file will not be
-        deleted.
+    If `data_paths` is provided, it is a list of directories or files to
+    combine.  Directories are searched for files that start with
+    `data.filename` plus dot as a prefix, and those files are combined.
 
-        If `strict` is true, and no files are found to combine, an error is
-        raised.
-
-        """
-        # Because of the os.path.abspath in the constructor, data_dir will
-        # never be an empty string.
-        data_dir, local = os.path.split(self.filename)
-        localdot = local + '.*'
-
-        data_paths = data_paths or [data_dir]
-        files_to_combine = []
-        for p in data_paths:
-            if os.path.isfile(p):
-                files_to_combine.append(os.path.abspath(p))
-            elif os.path.isdir(p):
-                pattern = os.path.join(os.path.abspath(p), localdot)
-                files_to_combine.extend(glob.glob(pattern))
-            else:
-                raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
+    If `data_paths` is not provided, then the directory portion of
+    `data.filename` is used as the directory to search for data files.
 
-        if strict and not files_to_combine:
-            raise CoverageException("No data to combine")
+    Every data file found and combined is then deleted from disk. If a file
+    cannot be read, a warning will be issued, and the file will not be
+    deleted.
 
-        files_combined = 0
-        for f in files_to_combine:
-            new_data = CoverageData(debug=self.debug)
-            try:
-                new_data.read_file(f)
-            except CoverageException as exc:
-                if self.warn:
-                    # The CoverageException has the file name in it, so just
-                    # use the message as the warning.
-                    self.warn(str(exc))
-            else:
-                data.update(new_data, aliases=aliases)
-                files_combined += 1
-                if self.debug and self.debug.should('dataio'):
-                    self.debug.write("Deleting combined data file %r" % (f,))
-                file_be_gone(f)
-
-        if strict and not files_combined:
-            raise CoverageException("No usable data files")
-
-
-def canonicalize_json_data(data):
-    """Canonicalize our JSON data so it can be compared."""
-    for fname, lines in iitems(data.get('lines', {})):
-        data['lines'][fname] = sorted(lines)
-    for fname, arcs in iitems(data.get('arcs', {})):
-        data['arcs'][fname] = sorted(arcs)
-
-
-def pretty_data(data):
-    """Format data as JSON, but as nicely as possible.
-
-    Returns a string.
+    If `strict` is true, and no files are found to combine, an error is
+    raised.
 
     """
-    # Start with a basic JSON dump.
-    out = json.dumps(data, indent=4, sort_keys=True)
-    # But pairs of numbers shouldn't be split across lines...
-    out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
-    # Trailing spaces mess with tests, get rid of them.
-    out = re.sub(r"(?m)\s+$", "", out)
-    return out
+    # Because of the os.path.abspath in the constructor, data_dir will
+    # never be an empty string.
+    data_dir, local = os.path.split(data.base_filename())
+    localdot = local + '.*'
 
-
-def debug_main(args):
-    """Dump the raw data from data files.
+    data_paths = data_paths or [data_dir]
+    files_to_combine = []
+    for p in data_paths:
+        if os.path.isfile(p):
+            files_to_combine.append(os.path.abspath(p))
+        elif os.path.isdir(p):
+            pattern = os.path.join(os.path.abspath(p), localdot)
+            files_to_combine.extend(glob.glob(pattern))
+        else:
+            raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
 
-    Run this as::
-
-        $ python -m coverage.data [FILE]
+    if strict and not files_to_combine:
+        raise CoverageException("No data to combine")
 
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        "-c", "--canonical", action="store_true",
-        help="Sort data into a canonical order",
-    )
-    options, args = parser.parse_args(args)
+    files_combined = 0
+    for f in files_to_combine:
+        if f == data.data_filename():
+            # Sometimes we are combining into a file which is one of the
+            # parallel files.  Skip that file.
+            if data._debug.should('dataio'):
+                data._debug.write("Skipping combining ourself: %r" % (f,))
+            continue
+        if data._debug.should('dataio'):
+            data._debug.write("Combining data file %r" % (f,))
+        try:
+            new_data = CoverageData(f, debug=data._debug)
+            new_data.read()
+        except CoverageException as exc:
+            if data._warn:
+                # The CoverageException has the file name in it, so just
+                # use the message as the warning.
+                data._warn(str(exc))
+        else:
+            data.update(new_data, aliases=aliases)
+            files_combined += 1
+            if data._debug.should('dataio'):
+                data._debug.write("Deleting combined data file %r" % (f,))
+            file_be_gone(f)
 
-    for filename in (args or [".coverage"]):
-        print("--- {0} ------------------------------".format(filename))
-        data = CoverageData._read_raw_data_file(filename)
-        if options.canonical:
-            canonicalize_json_data(data)
-        print(pretty_data(data))
-
-
-if __name__ == '__main__':
-    import sys
-    debug_main(sys.argv[1:])
+    if strict and not files_combined:
+        raise CoverageException("No usable data files")
--- a/eric6/DebugClients/Python/coverage/debug.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/debug.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,19 +1,21 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Control of and utilities for debugging."""
 
 import contextlib
+import functools
 import inspect
+import itertools
 import os
-import re
+import pprint
 import sys
 try:
     import _thread
 except ImportError:
     import thread as _thread
 
-from coverage.backward import StringIO
+from coverage.backward import reprlib, StringIO
 from coverage.misc import isolate_module
 
 os = isolate_module(os)
@@ -23,28 +25,28 @@
 # debugging the configuration mechanisms you usually use to control debugging!
 # This is a list of forced debugging options.
 FORCED_DEBUG = []
-
-# A hack for debugging testing in sub-processes.
-_TEST_NAME_FILE = ""    # "/tmp/covtest.txt"
+FORCED_DEBUG_FILE = None
 
 
 class DebugControl(object):
     """Control and output for debugging."""
 
+    show_repr_attr = False      # For SimpleReprMixin
+
     def __init__(self, options, output):
         """Configure the options and output file for debugging."""
         self.options = list(options) + FORCED_DEBUG
-        self.raw_output = output
         self.suppress_callers = False
 
         filters = []
         if self.should('pid'):
             filters.append(add_pid_and_tid)
-        self.output = DebugOutputFile(
-            self.raw_output,
+        self.output = DebugOutputFile.get_one(
+            output,
             show_process=self.should('process'),
             filters=filters,
         )
+        self.raw_output = self.output.outfile
 
     def __repr__(self):
         return "<DebugControl options=%r raw_output=%r>" % (self.options, self.raw_output)
@@ -72,6 +74,10 @@
 
         """
         self.output.write(msg+"\n")
+        if self.should('self'):
+            caller_self = inspect.stack()[1][0].f_locals.get('self')
+            if caller_self is not None:
+                self.output.write("self: {!r}\n".format(caller_self))
         if self.should('callers'):
             dump_stack_frames(out=self.output, skip=1)
         self.output.flush()
@@ -87,9 +93,16 @@
         return self.raw_output.getvalue()
 
 
+class NoDebugging(object):
+    """A replacement for DebugControl that will never try to do anything."""
+    def should(self, option):               # pylint: disable=unused-argument
+        """Should we write debug messages?  Never."""
+        return False
+
+
 def info_header(label):
     """Make a nice header string."""
-    return "--{0:-<60s}".format(" "+label+" ")
+    return "--{:-<60s}".format(" "+label+" ")
 
 
 def info_formatter(info):
@@ -102,7 +115,8 @@
     info = list(info)
     if not info:
         return
-    label_len = max(len(l) for l, _d in info)
+    label_len = 30
+    assert all(len(l) < label_len for l, _ in info)
     for label, data in info:
         if data == []:
             data = "-none-"
@@ -141,7 +155,7 @@
 
     """
     stack = inspect.stack()[limit:skip:-1]
-    return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack)
+    return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack)
 
 
 def dump_stack_frames(limit=None, out=None, skip=0):
@@ -151,6 +165,13 @@
     out.write("\n")
 
 
+def clipped_repr(text, numchars=50):
+    """`repr(text)`, but limited to `numchars`."""
+    r = reprlib.Repr()
+    r.maxstring = numchars
+    return r.repr(text)
+
+
 def short_id(id64):
     """Given a 64-bit id, make a shorter 16-bit one."""
     id16 = 0
@@ -162,11 +183,47 @@
 def add_pid_and_tid(text):
     """A filter to add pid and tid to debug messages."""
     # Thread ids are useful, but too long. Make a shorter one.
-    tid = "{0:04x}".format(short_id(_thread.get_ident()))
-    text = "{0:5d}.{1}: {2}".format(os.getpid(), tid, text)
+    tid = "{:04x}".format(short_id(_thread.get_ident()))
+    text = "{:5d}.{}: {}".format(os.getpid(), tid, text)
     return text
 
 
+class SimpleReprMixin(object):
+    """A mixin implementing a simple __repr__."""
+    simple_repr_ignore = ['simple_repr_ignore', '$coverage.object_id']
+
+    def __repr__(self):
+        show_attrs = (
+            (k, v) for k, v in self.__dict__.items()
+            if getattr(v, "show_repr_attr", True)
+            and not callable(v)
+            and k not in self.simple_repr_ignore
+        )
+        return "<{klass} @0x{id:x} {attrs}>".format(
+            klass=self.__class__.__name__,
+            id=id(self),
+            attrs=" ".join("{}={!r}".format(k, v) for k, v in show_attrs),
+            )
+
+
+def simplify(v):                                            # pragma: debugging
+    """Turn things which are nearly dict/list/etc into dict/list/etc."""
+    if isinstance(v, dict):
+        return {k:simplify(vv) for k, vv in v.items()}
+    elif isinstance(v, (list, tuple)):
+        return type(v)(simplify(vv) for vv in v)
+    elif hasattr(v, "__dict__"):
+        return simplify({'.'+k: v for k, v in v.__dict__.items()})
+    else:
+        return v
+
+
+def pp(v):                                                  # pragma: debugging
+    """Debug helper to pretty-print data, including SimpleNamespace objects."""
+    # Might not be needed in 3.9+
+    pprint.pprint(simplify(v))
+
+
 def filter_text(text, filters):
     """Run `text` through a series of filters.
 
@@ -197,7 +254,7 @@
         """Add a cwd message for each new cwd."""
         cwd = os.getcwd()
         if cwd != self.cwd:
-            text = "cwd is now {0!r}\n".format(cwd) + text
+            text = "cwd is now {!r}\n".format(cwd) + text
             self.cwd = cwd
         return text
 
@@ -210,32 +267,51 @@
         self.filters = list(filters)
 
         if self.show_process:
-            self.filters.append(CwdTracker().filter)
-            cmd = " ".join(getattr(sys, 'argv', ['???']))
-            self.write("New process: executable: %s\n" % (sys.executable,))
-            self.write("New process: cmd: %s\n" % (cmd,))
+            self.filters.insert(0, CwdTracker().filter)
+            self.write("New process: executable: %r\n" % (sys.executable,))
+            self.write("New process: cmd: %r\n" % (getattr(sys, 'argv', None),))
             if hasattr(os, 'getppid'):
-                self.write("New process: parent pid: %s\n" % (os.getppid(),))
+                self.write("New process: pid: %r, parent pid: %r\n" % (os.getpid(), os.getppid()))
 
     SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one'
 
     @classmethod
-    def the_one(cls, fileobj=None, show_process=True, filters=()):
-        """Get the process-wide singleton DebugOutputFile.
+    def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False):
+        """Get a DebugOutputFile.
+
+        If `fileobj` is provided, then a new DebugOutputFile is made with it.
 
-        If it doesn't exist yet, then create it as a wrapper around the file
-        object `fileobj`. `show_process` controls whether the debug file adds
-        process-level information.
+        If `fileobj` isn't provided, then a file is chosen
+        (COVERAGE_DEBUG_FILE, or stderr), and a process-wide singleton
+        DebugOutputFile is made.
+
+        `show_process` controls whether the debug file adds process-level
+        information, and filters is a list of other message filters to apply.
+
+        `filters` are the text filters to apply to the stream to annotate with
+        pids, etc.
+
+        If `interim` is true, then a future `get_one` can replace this one.
 
         """
+        if fileobj is not None:
+            # Make DebugOutputFile around the fileobj passed.
+            return cls(fileobj, show_process, filters)
+
         # Because of the way igor.py deletes and re-imports modules,
         # this class can be defined more than once. But we really want
         # a process-wide singleton. So stash it in sys.modules instead of
         # on a class attribute. Yes, this is aggressively gross.
-        the_one = sys.modules.get(cls.SYS_MOD_NAME)
-        if the_one is None:
-            assert fileobj is not None
-            sys.modules[cls.SYS_MOD_NAME] = the_one = cls(fileobj, show_process, filters)
+        the_one, is_interim = sys.modules.get(cls.SYS_MOD_NAME, (None, True))
+        if the_one is None or is_interim:
+            if fileobj is None:
+                debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
+                if debug_file_name:
+                    fileobj = open(debug_file_name, "a")
+                else:
+                    fileobj = sys.stderr
+            the_one = cls(fileobj, show_process, filters)
+            sys.modules[cls.SYS_MOD_NAME] = (the_one, interim)
         return the_one
 
     def write(self, text):
@@ -250,46 +326,81 @@
 
 def log(msg, stack=False):                                  # pragma: debugging
     """Write a log message as forcefully as possible."""
-    out = DebugOutputFile.the_one()
+    out = DebugOutputFile.get_one(interim=True)
     out.write(msg+"\n")
     if stack:
         dump_stack_frames(out=out, skip=1)
 
 
-def filter_aspectlib_frames(text):                          # pragma: debugging
-    """Aspectlib prints stack traces, but includes its own frames.  Scrub those out."""
-    # <<< aspectlib/__init__.py:257:function_wrapper < igor.py:143:run_tests < ...
-    text = re.sub(r"(?<= )aspectlib/[^.]+\.py:\d+:\w+ < ", "", text)
-    return text
+def decorate_methods(decorator, butnot=(), private=False):  # pragma: debugging
+    """A class decorator to apply a decorator to methods."""
+    def _decorator(cls):
+        for name, meth in inspect.getmembers(cls, inspect.isroutine):
+            if name not in cls.__dict__:
+                continue
+            if name != "__init__":
+                if not private and name.startswith("_"):
+                    continue
+            if name in butnot:
+                continue
+            setattr(cls, name, decorator(meth))
+        return cls
+    return _decorator
+
+
+def break_in_pudb(func):                                    # pragma: debugging
+    """A function decorator to stop in the debugger for each call."""
+    @functools.wraps(func)
+    def _wrapper(*args, **kwargs):
+        import pudb
+        sys.stdout = sys.__stdout__
+        pudb.set_trace()
+        return func(*args, **kwargs)
+    return _wrapper
 
 
-def enable_aspectlib_maybe():                               # pragma: debugging
-    """For debugging, we can use aspectlib to trace execution.
-
-    Define COVERAGE_ASPECTLIB to enable and configure aspectlib to trace
-    execution::
-
-        $ export COVERAGE_LOG=covaspect.txt
-        $ export COVERAGE_ASPECTLIB=coverage.Coverage:coverage.data.CoverageData
-        $ coverage run blah.py ...
-
-    This will trace all the public methods on Coverage and CoverageData,
-    writing the information to covaspect.txt.
+OBJ_IDS = itertools.count()
+CALLS = itertools.count()
+OBJ_ID_ATTR = "$coverage.object_id"
 
-    """
-    aspects = os.environ.get("COVERAGE_ASPECTLIB", "")
-    if not aspects:
-        return
-
-    import aspectlib                            # pylint: disable=import-error
-    import aspectlib.debug                      # pylint: disable=import-error
+def show_calls(show_args=True, show_stack=False, show_return=False):    # pragma: debugging
+    """A method decorator to debug-log each call to the function."""
+    def _decorator(func):
+        @functools.wraps(func)
+        def _wrapper(self, *args, **kwargs):
+            oid = getattr(self, OBJ_ID_ATTR, None)
+            if oid is None:
+                oid = "{:08d} {:04d}".format(os.getpid(), next(OBJ_IDS))
+                setattr(self, OBJ_ID_ATTR, oid)
+            extra = ""
+            if show_args:
+                eargs = ", ".join(map(repr, args))
+                ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items())
+                extra += "("
+                extra += eargs
+                if eargs and ekwargs:
+                    extra += ", "
+                extra += ekwargs
+                extra += ")"
+            if show_stack:
+                extra += " @ "
+                extra += "; ".join(_clean_stack_line(l) for l in short_stack().splitlines())
+            callid = next(CALLS)
+            msg = "{} {:04d} {}{}\n".format(oid, callid, func.__name__, extra)
+            DebugOutputFile.get_one(interim=True).write(msg)
+            ret = func(self, *args, **kwargs)
+            if show_return:
+                msg = "{} {:04d} {} return {!r}\n".format(oid, callid, func.__name__, ret)
+                DebugOutputFile.get_one(interim=True).write(msg)
+            return ret
+        return _wrapper
+    return _decorator
 
-    filename = os.environ.get("COVERAGE_LOG", "/tmp/covlog.txt")
-    filters = [add_pid_and_tid, filter_aspectlib_frames]
-    aspects_file = DebugOutputFile.the_one(open(filename, "a"), show_process=True, filters=filters)
-    aspect_log = aspectlib.debug.log(
-        print_to=aspects_file, attributes=['id'], stacktrace=30, use_logging=False
-    )
-    public_methods = re.compile(r'^(__init__|[a-zA-Z].*)$')
-    for aspect in aspects.split(':'):
-        aspectlib.weave(aspect, aspect_log, methods=public_methods)
+
+def _clean_stack_line(s):                                   # pragma: debugging
+    """Simplify some paths in a stack trace, for compactness."""
+    s = s.strip()
+    s = s.replace(os.path.dirname(__file__) + '/', '')
+    s = s.replace(os.path.dirname(os.__file__) + '/', '')
+    s = s.replace(sys.prefix + '/', '')
+    return s
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/eric6/DebugClients/Python/coverage/disposition.py	Sat Feb 22 14:27:42 2020 +0100
@@ -0,0 +1,37 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Simple value objects for tracking what to do with files."""
+
+
+class FileDisposition(object):
+    """A simple value type for recording what to do with a file."""
+    pass
+
+
+# FileDisposition "methods": FileDisposition is a pure value object, so it can
+# be implemented in either C or Python.  Acting on them is done with these
+# functions.
+
+def disposition_init(cls, original_filename):
+    """Construct and initialize a new FileDisposition object."""
+    disp = cls()
+    disp.original_filename = original_filename
+    disp.canonical_filename = original_filename
+    disp.source_filename = None
+    disp.trace = False
+    disp.reason = ""
+    disp.file_tracer = None
+    disp.has_dynamic_filename = False
+    return disp
+
+
+def disposition_debug_msg(disp):
+    """Make a nice debug message of what the FileDisposition is doing."""
+    if disp.trace:
+        msg = "Tracing %r" % (disp.original_filename,)
+        if disp.file_tracer:
+            msg += ": will be traced by %r" % disp.file_tracer
+    else:
+        msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
+    return msg
--- a/eric6/DebugClients/Python/coverage/doc/CHANGES.rst	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/doc/CHANGES.rst	Sat Feb 22 14:27:42 2020 +0100
@@ -1,21 +1,520 @@
 .. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 ==============================
-Change history for Coverage.py
+Change history for coverage.py
 ==============================
 
+These changes are listed in decreasing version number order. Note this can be
+different from a strict chronological order when there are two branches in
+development at the same time, such as 4.5.x and 5.0.
+
+This list is detailed and covers changes in each pre-release version.  If you
+want to know what's different in 5.0 since 4.5.x, see :ref:`whatsnew5x`.
+
+
     .. When updating the "Unreleased" header to a specific version, use this
     .. format.  Don't forget the jump target:
     ..
-    ..
-    ..  .. _changes_781:
+    ..  .. _changes_981:
     ..
-    ..
-    ..  Version 7.8.1 --- 2021-07-27
+    ..  Version 9.8.1 --- 2027-07-27
     ..  ----------------------------
 
 
+.. _changes_503:
+
+Version 5.0.3 --- 2020-01-12
+----------------------------
+
+- A performance improvement in 5.0.2 didn't work for test suites that changed
+  directory before combining data, causing "Couldn't use data file: no such
+  table: meta" errors (`issue 916`_).  This is now fixed.
+
+- Coverage could fail to run your program with some form of "ModuleNotFound" or
+  "ImportError" trying to import from the current directory. This would happen
+  if coverage had been packaged into a zip file (for example, on Windows), or
+  was found indirectly (for example, by pyenv-virtualenv).  A number of
+  different scenarios were described in `issue 862`_ which is now fixed.  Huge
+  thanks to Agbonze O. Jeremiah for reporting it, and Alexander Waters and
+  George-Cristian Bîrzan for protracted debugging sessions.
+
+- Added the "premain" debug option.
+
+- Added SQLite compile-time options to the "debug sys" output.
+
+.. _issue 862: https://github.com/nedbat/coveragepy/issues/862
+.. _issue 916: https://github.com/nedbat/coveragepy/issues/916
+
+
+.. _changes_502:
+
+Version 5.0.2 --- 2020-01-05
+----------------------------
+
+- Programs that used multiprocessing and changed directories would fail under
+  coverage.  This is now fixed (`issue 890`_).  A side effect is that debug
+  information about the config files read now shows absolute paths to the
+  files.
+
+- When running programs as modules (``coverage run -m``) with ``--source``,
+  some measured modules were imported before coverage starts.  This resulted in
+  unwanted warnings ("Already imported a file that will be measured") and a
+  reduction in coverage totals (`issue 909`_).  This is now fixed.
+
+- If no data was collected, an exception about "No data to report" could happen
+  instead of a 0% report being created (`issue 884`_).  This is now fixed.
+
+- The handling of source files with non-encodable file names has changed.
+  Previously, if a file name could not be encoded as UTF-8, an error occurred,
+  as described in `issue 891`_.  Now, those files will not be measured, since
+  their data would not be recordable.
+
+- A new warning ("dynamic-conflict") is issued if two mechanisms are trying to
+  change the dynamic context.  Closes `issue 901`_.
+
+- ``coverage run --debug=sys`` would fail with an AttributeError. This is now
+  fixed (`issue 907`_).
+
+.. _issue 884: https://github.com/nedbat/coveragepy/issues/884
+.. _issue 890: https://github.com/nedbat/coveragepy/issues/890
+.. _issue 891: https://github.com/nedbat/coveragepy/issues/891
+.. _issue 901: https://github.com/nedbat/coveragepy/issues/901
+.. _issue 907: https://github.com/nedbat/coveragepy/issues/907
+.. _issue 909: https://github.com/nedbat/coveragepy/issues/909
+
+
+.. _changes_501:
+
+Version 5.0.1 --- 2019-12-22
+----------------------------
+
+- If a 4.x data file is the cause of a "file is not a database" error, then use
+  a more specific error message, "Looks like a coverage 4.x data file, are you
+  mixing versions of coverage?"  Helps diagnose the problems described in
+  `issue 886`_.
+
+- Measurement contexts and relative file names didn't work together, as
+  reported in `issue 899`_ and `issue 900`_.  This is now fixed, thanks to
+  David Szotten.
+
+- When using ``coverage run --concurrency=multiprocessing``, all data files
+  should be named with parallel-ready suffixes.  5.0 mistakenly named the main
+  process' file with no suffix when using ``--append``.  This is now fixed,
+  closing `issue 880`_.
+
+- Fixed a problem on Windows when the current directory is changed to a
+  different drive (`issue 895`_).  Thanks, Olivier Grisel.
+
+- Updated Python 3.9 support to 3.9a2.
+
+.. _issue 880: https://github.com/nedbat/coveragepy/issues/880
+.. _issue 886: https://github.com/nedbat/coveragepy/issues/886
+.. _issue 895: https://github.com/nedbat/coveragepy/issues/895
+.. _issue 899: https://github.com/nedbat/coveragepy/issues/899
+.. _issue 900: https://github.com/nedbat/coveragepy/issues/900
+
+
+.. _changes_50:
+
+Version 5.0 --- 2019-12-14
+--------------------------
+
+Nothing new beyond 5.0b2.
+
+
+.. _changes_50b2:
+
+Version 5.0b2 --- 2019-12-08
+----------------------------
+
+- An experimental ``[run] relative_files`` setting tells coverage to store
+  relative file names in the data file. This makes it easier to run tests in
+  one (or many) environments, and then report in another.  It has not had much
+  real-world testing, so it may change in incompatible ways in the future.
+
+- When constructing a :class:`coverage.Coverage` object, `data_file` can be
+  specified as None to prevent writing any data file at all.  In previous
+  versions, an explicit `data_file=None` argument would use the default of
+  ".coverage". Fixes `issue 871`_.
+
+- Python files run with ``-m`` now have ``__spec__`` defined properly.  This
+  fixes `issue 745`_ (about not being able to run unittest tests that spawn
+  subprocesses), and `issue 838`_, which described the problem directly.
+
+- The ``[paths]`` configuration section is now ordered. If you specify more
+  than one list of patterns, the first one that matches will be used.  Fixes
+  `issue 649`_.
+
+- The :func:`.coverage.numbits.register_sqlite_functions` function now also
+  registers `numbits_to_nums` for use in SQLite queries.  Thanks, Simon
+  Willison.
+
+- Python 3.9a1 is supported.
+
+- Coverage.py has a mascot: :ref:`Sleepy Snake <sleepy>`.
+
+.. _issue 649: https://github.com/nedbat/coveragepy/issues/649
+.. _issue 745: https://github.com/nedbat/coveragepy/issues/745
+.. _issue 838: https://github.com/nedbat/coveragepy/issues/838
+.. _issue 871: https://github.com/nedbat/coveragepy/issues/871
+
+
+.. _changes_50b1:
+
+Version 5.0b1 --- 2019-11-11
+----------------------------
+
+- The HTML and textual reports now have a ``--skip-empty`` option that skips
+  files with no statements, notably ``__init__.py`` files.  Thanks, Reya B.
+
+- Configuration can now be read from `TOML`_ files.  This requires installing
+  coverage.py with the ``[toml]`` extra.  The standard "pyproject.toml" file
+  will be read automatically if no other configuration file is found, with
+  settings in the ``[tool.coverage.]`` namespace.  Thanks to Frazer McLean for
+  implementation and persistence.  Finishes `issue 664`_.
+
+- The ``[run] note`` setting has been deprecated. Using it will result in a
+  warning, and the note will not be written to the data file.  The
+  corresponding :class:`.CoverageData` methods have been removed.
+
+- The HTML report has been reimplemented (no more table around the source
+  code). This allowed for a better presentation of the context information,
+  hopefully resolving `issue 855`_.
+
+- Added sqlite3 module version information to ``coverage debug sys`` output.
+
+- Asking the HTML report to show contexts (``[html] show_contexts=True`` or
+  ``coverage html --show-contexts``) will issue a warning if there were no
+  contexts measured (`issue 851`_).
+
+.. _TOML: https://github.com/toml-lang/toml#readme
+.. _issue 664: https://github.com/nedbat/coveragepy/issues/664
+.. _issue 851: https://github.com/nedbat/coveragepy/issues/851
+.. _issue 855: https://github.com/nedbat/coveragepy/issues/855
+
+
+.. _changes_50a8:
+
+Version 5.0a8 --- 2019-10-02
+----------------------------
+
+- The :class:`.CoverageData` API has changed how queries are limited to
+  specific contexts.  Now you use :meth:`.CoverageData.set_query_context` to
+  set a single exact-match string, or :meth:`.CoverageData.set_query_contexts`
+  to set a list of regular expressions to match contexts.  This changes the
+  command-line ``--contexts`` option to use regular expressions instead of
+  filename-style wildcards.
+
+
+.. _changes_50a7:
+
+Version 5.0a7 --- 2019-09-21
+----------------------------
+
+- Data can now be "reported" in JSON format, for programmatic use, as requested
+  in `issue 720`_.  The new ``coverage json`` command writes raw and summarized
+  data to a JSON file.  Thanks, Matt Bachmann.
+
+- Dynamic contexts are now supported in the Python tracer, which is important
+  for PyPy users.  Closes `issue 846`_.
+
+- The compact line number representation introduced in 5.0a6 is called a
+  "numbits."  The :mod:`coverage.numbits` module provides functions for working
+  with them.
+
+- The reporting methods used to permanently apply their arguments to the
+  configuration of the Coverage object.  Now they no longer do.  The arguments
+  affect the operation of the method, but do not persist.
+
+- A class named "test_something" no longer confuses the ``test_function``
+  dynamic context setting.  Fixes `issue 829`_.
+
+- Fixed an unusual tokenizing issue with backslashes in comments.  Fixes
+  `issue 822`_.
+
+- ``debug=plugin`` didn't properly support configuration or dynamic context
+  plugins, but now it does, closing `issue 834`_.
+
+.. _issue 720: https://github.com/nedbat/coveragepy/issues/720
+.. _issue 822: https://github.com/nedbat/coveragepy/issues/822
+.. _issue 834: https://github.com/nedbat/coveragepy/issues/834
+.. _issue 829: https://github.com/nedbat/coveragepy/issues/829
+.. _issue 846: https://github.com/nedbat/coveragepy/issues/846
+
+
+.. _changes_50a6:
+
+Version 5.0a6 --- 2019-07-16
+----------------------------
+
+- Reporting on contexts. Big thanks to Stephan Richter and Albertas Agejevas
+  for the contribution.
+
+  - The ``--contexts`` option is available on the ``report`` and ``html``
+    commands.  It's a comma-separated list of shell-style wildcards, selecting
+    the contexts to report on.  Only contexts matching one of the wildcards
+    will be included in the report.
+
+  - The ``--show-contexts`` option for the ``html`` command adds context
+    information to each covered line.  Hovering over the "ctx" marker at the
+    end of the line reveals a list of the contexts that covered the line.
+
+- Database changes:
+
+  - Line numbers are now stored in a much more compact way.  For each file and
+    context, a single binary string is stored with a bit per line number.  This
+    greatly improves memory use, but makes ad-hoc use difficult.
+
+  - Dynamic contexts with no data are no longer written to the database.
+
+  - SQLite data storage is now faster.  There's no longer a reason to keep the
+    JSON data file code, so it has been removed.
+
+- Changes to the :class:`.CoverageData` interface:
+
+  - The new :meth:`.CoverageData.dumps` method serializes the data to a string,
+    and a corresponding :meth:`.CoverageData.loads` method reconstitutes this
+    data.  The format of the data string is subject to change at any time, and
+    so should only be used between two installations of the same version of
+    coverage.py.
+
+  - The :meth:`CoverageData constructor<.CoverageData.__init__>` has a new
+    argument, `no_disk` (default: False).  Setting it to True prevents writing
+    any data to the disk.  This is useful for transient data objects.
+
+- Added the classmethod :meth:`.Coverage.current` to get the latest started
+  Coverage instance.
+
+- Multiprocessing support in Python 3.8 was broken, but is now fixed.  Closes
+  `issue 828`_.
+
+- Error handling during reporting has changed slightly.  All reporting methods
+  now behave the same.  The ``--ignore-errors`` option keeps errors from
+  stopping the reporting, but files that couldn't parse as Python will always
+  be reported as warnings.  As with other warnings, you can suppress them with
+  the ``[run] disable_warnings`` configuration setting.
+
+- Coverage.py no longer fails if the user program deletes its current
+  directory. Fixes `issue 806`_.  Thanks, Dan Hemberger.
+
+- The scrollbar markers in the HTML report now accurately show the highlighted
+  lines, regardless of what categories of line are highlighted.
+
+- The hack to accommodate ShiningPanda_ looking for an obsolete internal data
+  file has been removed, since ShiningPanda 0.22 fixed it four years ago.
+
+- The deprecated `Reporter.file_reporters` property has been removed.
+
+.. _ShiningPanda: https://wiki.jenkins.io/display/JENKINS/ShiningPanda+Plugin
+.. _issue 806: https://github.com/nedbat/coveragepy/pull/806
+.. _issue 828: https://github.com/nedbat/coveragepy/issues/828
+
+
+.. _changes_50a5:
+
+Version 5.0a5 --- 2019-05-07
+----------------------------
+
+- Drop support for Python 3.4
+
+- Dynamic contexts can now be set two new ways, both thanks to Justas
+  Sadzevičius.
+
+  - A plugin can implement a ``dynamic_context`` method to check frames for
+    whether a new context should be started.  See
+    :ref:`dynamic_context_plugins` for more details.
+
+  - Another tool (such as a test runner) can use the new
+    :meth:`.Coverage.switch_context` method to explicitly change the context.
+
+- The ``dynamic_context = test_function`` setting now works with Python 2
+  old-style classes, though it only reports the method name, not the class it
+  was defined on.  Closes `issue 797`_.
+
+- ``fail_under`` values more than 100 are reported as errors.  Thanks to Mike
+  Fiedler for closing `issue 746`_.
+
+- The "missing" values in the text output are now sorted by line number, so
+  that missing branches are reported near the other lines they affect. The
+  values used to show all missing lines, and then all missing branches.
+
+- Access to the SQLite database used for data storage is now thread-safe.
+  Thanks, Stephan Richter. This closes `issue 702`_.
+
+- Combining data stored in SQLite is now about twice as fast, fixing `issue
+  761`_.  Thanks, Stephan Richter.
+
+- The ``filename`` attribute on :class:`.CoverageData` objects has been made
+  private.  You can use the ``data_filename`` method to get the actual file
+  name being used to store data, and the ``base_filename`` method to get the
+  original filename before parallelizing suffixes were added.  This is part of
+  fixing `issue 708`_.
+
+- Line numbers in the HTML report now align properly with source lines, even
+  when Chrome's minimum font size is set, fixing `issue 748`_.  Thanks Wen Ye.
+
+.. _issue 702: https://github.com/nedbat/coveragepy/issues/702
+.. _issue 708: https://github.com/nedbat/coveragepy/issues/708
+.. _issue 746: https://github.com/nedbat/coveragepy/issues/746
+.. _issue 748: https://github.com/nedbat/coveragepy/issues/748
+.. _issue 761: https://github.com/nedbat/coveragepy/issues/761
+.. _issue 797: https://github.com/nedbat/coveragepy/issues/797
+
+
+.. _changes_50a4:
+
+Version 5.0a4 --- 2018-11-25
+----------------------------
+
+- You can specify the command line to run your program with the ``[run]
+  command_line`` configuration setting, as requested in `issue 695`_.
+
+- Coverage will create directories as needed for the data file if they don't
+  exist, closing `issue 721`_.
+
+- The ``coverage run`` command has always adjusted the first entry in sys.path,
+  to properly emulate how Python runs your program.  Now this adjustment is
+  skipped if sys.path[0] is already different than Python's default.  This
+  fixes `issue 715`_.
+
+- Improvements to context support:
+
+  - The "no such table: meta" error is fixed.: `issue 716`_.
+
+  - Combining data files is now much faster.
+
+- Python 3.8 (as of today!) passes all tests.
+
+.. _issue 695: https://github.com/nedbat/coveragepy/issues/695
+.. _issue 715: https://github.com/nedbat/coveragepy/issues/715
+.. _issue 716: https://github.com/nedbat/coveragepy/issues/716
+.. _issue 721: https://github.com/nedbat/coveragepy/issues/721
+
+
+.. _changes_50a3:
+
+Version 5.0a3 --- 2018-10-06
+----------------------------
+
+- Context support: static contexts let you specify a label for a coverage run,
+  which is recorded in the data, and retained when you combine files.  See
+  :ref:`contexts` for more information.
+
+- Dynamic contexts: specifying ``[run] dynamic_context = test_function`` in the
+  config file will record the test function name as a dynamic context during
+  execution.  This is the core of "Who Tests What" (`issue 170`_).  Things to
+  note:
+
+  - There is no reporting support yet.  Use SQLite to query the .coverage file
+    for information.  Ideas are welcome about how reporting could be extended
+    to use this data.
+
+  - There's a noticeable slow-down before any test is run.
+
+  - Data files will now be roughly N times larger, where N is the number of
+    tests you have.  Combining data files is therefore also N times slower.
+
+  - No other values for ``dynamic_context`` are recognized yet.  Let me know
+    what else would be useful.  I'd like to use a pytest plugin to get better
+    information directly from pytest, for example.
+
+.. _issue 170: https://github.com/nedbat/coveragepy/issues/170
+
+- Environment variable substitution in configuration files now supports two
+  syntaxes for controlling the behavior of undefined variables: if ``VARNAME``
+  is not defined, ``${VARNAME?}`` will raise an error, and ``${VARNAME-default
+  value}`` will use "default value".
+
+- Partial support for Python 3.8, which has not yet released an alpha. Fixes
+  `issue 707`_ and `issue 714`_.
+
+.. _issue 707: https://github.com/nedbat/coveragepy/issues/707
+.. _issue 714: https://github.com/nedbat/coveragepy/issues/714
+
+
+.. _changes_50a2:
+
+Version 5.0a2 --- 2018-09-03
+----------------------------
+
+- Coverage's data storage has changed.  In version 4.x, .coverage files were
+  basically JSON.  Now, they are SQLite databases.  This means the data file
+  can be created earlier than it used to.  A large amount of code was
+  refactored to support this change.
+
+  - Because the data file is created differently than previous releases, you
+    may need ``parallel=true`` where you didn't before.
+
+  - The old data format is still available (for now) by setting the environment
+    variable COVERAGE_STORAGE=json. Please tell me if you think you need to
+    keep the JSON format.
+
+  - The database schema is guaranteed to change in the future, to support new
+    features.  I'm looking for opinions about making the schema part of the
+    public API to coverage.py or not.
+
+- Development moved from `Bitbucket`_ to `GitHub`_.
+
+- HTML files no longer have trailing and extra whitespace.
+
+- The sort order in the HTML report is stored in local storage rather than
+  cookies, closing `issue 611`_.  Thanks, Federico Bond.
+
+- pickle2json, for converting v3 data files to v4 data files, has been removed.
+
+.. _Bitbucket: https://bitbucket.org/ned/coveragepy
+.. _GitHub: https://github.com/nedbat/coveragepy
+
+.. _issue 611: https://github.com/nedbat/coveragepy/issues/611
+
+
+.. _changes_50a1:
+
+Version 5.0a1 --- 2018-06-05
+----------------------------
+
+- Coverage.py no longer supports Python 2.6 or 3.3.
+
+- The location of the configuration file can now be specified with a
+  ``COVERAGE_RCFILE`` environment variable, as requested in `issue 650`_.
+
+- Namespace packages are supported on Python 3.7, where they used to cause
+  TypeErrors about path being None. Fixes `issue 700`_.
+
+- A new warning (``already-imported``) is issued if measurable files have
+  already been imported before coverage.py started measurement.  See
+  :ref:`cmd_warnings` for more information.
+
+- Running coverage many times for small runs in a single process should be
+  faster, closing `issue 625`_.  Thanks, David MacIver.
+
+- Large HTML report pages load faster.  Thanks, Pankaj Pandey.
+
+.. _issue 625: https://bitbucket.org/ned/coveragepy/issues/625/lstat-dominates-in-the-case-of-small
+.. _issue 650: https://bitbucket.org/ned/coveragepy/issues/650/allow-setting-configuration-file-location
+.. _issue 700: https://github.com/nedbat/coveragepy/issues/700
+
+
+.. _changes_454:
+
+Version 4.5.4 --- 2019-07-29
+----------------------------
+
+- Multiprocessing support in Python 3.8 was broken, but is now fixed.  Closes
+  `issue 828`_.
+
+.. _issue 828: https://github.com/nedbat/coveragepy/issues/828
+
+
+.. _changes_453:
+
+Version 4.5.3 --- 2019-03-09
+----------------------------
+
+- Only packaging metadata changes.
+
+
 .. _changes_452:
 
 Version 4.5.2 --- 2018-11-12
@@ -24,7 +523,7 @@
 - Namespace packages are supported on Python 3.7, where they used to cause
   TypeErrors about path being None. Fixes `issue 700`_.
 
-- Python 3.8 (as of today!) passes all tests.  Fixes `issue 707` and
+- Python 3.8 (as of today!) passes all tests.  Fixes `issue 707`_ and
   `issue 714`_.
 
 - Development moved from `Bitbucket`_ to `GitHub`_.
@@ -62,7 +561,7 @@
 Version 4.5 --- 2018-02-03
 --------------------------
 
-- A new kind of plugin is supported: configurators are invoked at start-up to
+- A new kind of plugin is supported: configurers are invoked at start-up to
   allow more complex configuration than the .coveragerc file can easily do.
   See :ref:`api_plugin` for details.  This solves the complex configuration
   problem described in `issue 563`_.
@@ -279,7 +778,7 @@
 
 .. _issue 322: https://bitbucket.org/ned/coveragepy/issues/322/cannot-use-coverage-with-jython
 .. _issue 426: https://bitbucket.org/ned/coveragepy/issues/426/difference-between-coverage-results-with
-.. _issue 522: https://bitbucket.org/ned/coveragepy/issues/522/incorrect-branch-reporting-with-__debug__
+.. _issue 522: https://bitbucket.org/ned/coveragepy/issues/522/incorrect-branch-reporting
 .. _issue 549: https://bitbucket.org/ned/coveragepy/issues/549/skip-covered-with-100-coverage-throws-a-no
 .. _issue 551: https://bitbucket.org/ned/coveragepy/issues/551/coveragepy-cannot-be-imported-in-jython27
 
@@ -506,7 +1005,7 @@
 .. _issue 478: https://bitbucket.org/ned/coveragepy/issues/478/help-shows-silly-program-name-when-running
 .. _issue 484: https://bitbucket.org/ned/coveragepy/issues/484/multiprocessing-greenlet-concurrency
 .. _issue 492: https://bitbucket.org/ned/coveragepy/issues/492/subprocess-coverage-strange-detection-of
-.. _unittest-mixins: https://pypi.python.org/pypi/unittest-mixins
+.. _unittest-mixins: https://pypi.org/project/unittest-mixins/
 
 
 .. _changes_41:
@@ -593,9 +1092,9 @@
   - Class docstrings were considered executable.  Now they no longer are.
 
   - ``yield from`` and ``await`` were considered returns from functions, since
-    they could tranfer control to the caller.  This produced unhelpful "missing
-    branch" reports in a number of circumstances.  Now they no longer are
-    considered returns.
+    they could transfer control to the caller.  This produced unhelpful
+    "missing branch" reports in a number of circumstances.  Now they no longer
+    are considered returns.
 
   - In unusual situations, a missing branch to a negative number was reported.
     This has been fixed, closing `issue 466`_.
@@ -676,7 +1175,7 @@
 
 - The XML report now has correct ``<source>`` elements when using a
   ``--source=`` option somewhere besides the current directory.  This fixes
-  `issue 439`_. Thanks, Arcady Ivanov.
+  `issue 439`_. Thanks, Arcadiy Ivanov.
 
 - Fixed an unusual edge case of detecting source encodings, described in
   `issue 443`_.
@@ -775,7 +1274,7 @@
 - Reporting on an unmeasured file would fail with a traceback.  This is now
   fixed, closing `issue 403`_.
 
-- The Jenkins ShiningPanda plugin looks for an obsolete file name to find the
+- The Jenkins ShiningPanda_ plugin looks for an obsolete file name to find the
   HTML reports to publish, so it was failing under coverage.py 4.0.  Now we
   create that file if we are running under Jenkins, to keep things working
   smoothly. `issue 404`_.
@@ -1825,7 +2324,7 @@
 
 - On Python 3.x, setuptools has been replaced by `Distribute`_.
 
-.. _Distribute: https://pypi.python.org/pypi/distribute
+.. _Distribute: https://pypi.org/project/distribute/
 
 
 Version 3.2b3 --- 2009-11-23
@@ -2038,7 +2537,7 @@
 
 - The singleton coverage object is only created if the module-level functions
   are used.  This maintains the old interface while allowing better
-  programmatic use of Coverage.py.
+  programmatic use of coverage.py.
 
 - The minimum supported Python version is 2.3.
 
--- a/eric6/DebugClients/Python/coverage/doc/CONTRIBUTORS.txt	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/doc/CONTRIBUTORS.txt	Sat Feb 22 14:27:42 2020 +0100
@@ -5,10 +5,14 @@
 useful bug reports, have been made by:
 
 Adi Roiban
+Agbonze O. Jeremiah
+Albertas Agejevas
+Aleksi Torhamo
 Alex Gaynor
 Alex Groce
 Alex Sandro
 Alexander Todorov
+Alexander Walters
 Andrew Hoos
 Anthony Sottile
 Arcadiy Ivanov
@@ -31,6 +35,7 @@
 Christoph Zwerschke
 Conrad Ho
 Cosimo Lupo
+Dan Hemberger
 Dan Riti
 Dan Wandschneider
 Danek Duvall
@@ -39,30 +44,40 @@
 David Christian
 David MacIver
 David Stanek
+David Szotten
 Detlev Offenbach
 Devin Jeanpierre
 Dirk Thomas
 Dmitry Shishov
 Dmitry Trofimov
 Eduardo Schettino
+Eli Skeggs
 Emil Madsen
 Edward Loper
+Federico Bond
+Frazer McLean
 Geoff Bache
 George Paci
 George Song
+George-Cristian Bîrzan
 Greg Rogers
+Guido van Rossum
 Guillaume Chazarain
+Hugo van Kemenade
 Ilia Meerovich
 Imri Goldberg
 Ionel Cristian Mărieș
 JT Olds
 Jessamyn Smith
 Joe Doherty
+Joe Jevnik
 Jon Chappell
 Jon Dufresne
 Joseph Tate
 Josh Williams
 Julian Berman
+Julien Voisin
+Justas Sadzevičius
 Krystian Kichewko
 Kyle Altendorf
 Lars Hupfeldt Nielsen
@@ -71,20 +86,27 @@
 Loïc Dachary
 Marc Abramowitz
 Marcus Cobden
+Marius Gedminas
 Mark van der Wal
 Martin Fuzzey
+Matt Bachmann
 Matthew Boehm
 Matthew Desmarais
 Max Linke
+Michał Bultrowicz
 Mickie Betz
+Mike Fiedler
 Nathan Land
 Noel O'Boyle
 Olivier Grisel
+Ori Avtalion
+Pankaj Pandey
 Pablo Carballo
 Patrick Mezard
 Peter Baughman
 Peter Ebden
 Peter Portante
+Reya B
 Rodrigue Cloutier
 Roger Hu
 Ross Lawley
@@ -92,12 +114,16 @@
 Sandra Martocchia
 Scott Belden
 Sigve Tjora
+Simon Willison
 Stan Hu
 Stefan Behnel
+Stephan Richter
 Stephen Finucane
 Steve Leonard
 Steve Peak
+S. Y. Lee
 Ted Wexler
+Thijs Triemstra
 Titus Brown
 Ville Skyttä
 Yury Selivanov
--- a/eric6/DebugClients/Python/coverage/doc/README.rst	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/doc/README.rst	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 .. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 ===========
 Coverage.py
@@ -7,61 +7,51 @@
 
 Code coverage testing for Python.
 
-|  |license| |versions| |status| |docs|
-|  |ci-status| |win-ci-status| |codecov|
+|  |license| |versions| |status|
+|  |ci-status| |win-ci-status| |docs| |codecov|
 |  |kit| |format| |repos|
-|  |tidelift| |saythanks|
-
-.. downloads badge seems to be broken... |downloads|
+|  |stars| |forks| |contributors|
+|  |tidelift| |twitter-coveragepy| |twitter-nedbat|
 
 Coverage.py measures code coverage, typically during test execution. It uses
 the code analysis tools and tracing hooks provided in the Python standard
 library to determine which lines are executable, and which have been executed.
 
-.. |tideliftlogo| image:: doc/media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White_small.png
+Coverage.py runs on many versions of Python:
+
+* CPython 2.7.
+* CPython 3.5 through 3.9 alpha 2.
+* PyPy2 7.0 and PyPy3 7.0.
+
+Documentation is on `Read the Docs`_.  Code repository and issue tracker are on
+`GitHub`_.
+
+.. _Read the Docs: https://coverage.readthedocs.io/
+.. _GitHub: https://github.com/nedbat/coveragepy
+
+
+**New in 5.0:** SQLite data storage, JSON report, contexts, relative filenames,
+dropped support for Python 2.6, 3.3 and 3.4.
+
+
+For Enterprise
+--------------
+
+.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png
    :width: 75
    :alt: Tidelift
+   :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
 
 .. list-table::
    :widths: 10 100
 
    * - |tideliftlogo|
-     - Professional support for coverage.py is available as part of the `Tidelift
-       Subscription`_.  Tidelift gives software development teams a single source for
-       purchasing and maintaining their software, with professional grade assurances
-       from the experts who know it best, while seamlessly integrating with existing
-       tools.
-
-.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
-
-Coverage.py runs on many versions of Python:
-
-* CPython 2.6, 2.7 and 3.3 through pre-alpha 3.8.
-* PyPy2 5.10 and PyPy3 5.10.
-* Jython 2.7.1, though not for reporting.
-* IronPython 2.7.7, though not for reporting.
-
-Documentation is on `Read the Docs`_.  Code repository and issue tracker are on
-`Bitbucket`_, with a mirrored repository on `GitHub`_.
-
-.. _Read the Docs: https://coverage.readthedocs.io/
-.. _Bitbucket: https://bitbucket.org/ned/coveragepy
-.. _GitHub: https://github.com/nedbat/coveragepy
-
-
-**New in 4.5:** Configurator plug-ins.
-
-New in 4.4: Suppressable warnings, continuous coverage measurement.
-
-New in 4.3: HTML ``--skip-covered``, sys.excepthook support, tox.ini
-support.
-
-New in 4.2: better support for multiprocessing and combining data.
-
-New in 4.1: much-improved branch coverage.
-
-New in 4.0: ``--concurrency``, plugins for non-Python files, setup.cfg
-support, --skip-covered, HTML filtering, and more than 50 issues closed.
+     - `Available as part of the Tidelift Subscription. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
+       Coverage and thousands of other packages are working with
+       Tidelift to deliver one enterprise subscription that covers all of the open
+       source you use.  If you want the flexibility of open source and the confidence
+       of commercial-grade software, this is for you.
+       `Learn more. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
 
 
 Getting Started
@@ -72,6 +62,14 @@
 .. _Quick Start section: https://coverage.readthedocs.io/#quick-start
 
 
+Change history
+--------------
+
+The complete history of changes is on the `change history page`_.
+
+.. _change history page: https://coverage.readthedocs.io/en/latest/changes.html
+
+
 Contributing
 ------------
 
@@ -80,17 +78,26 @@
 .. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html
 
 
+Security
+--------
+
+To report a security vulnerability, please use the `Tidelift security
+contact`_.  Tidelift will coordinate the fix and disclosure.
+
+.. _Tidelift security contact: https://tidelift.com/security
+
+
 License
 -------
 
 Licensed under the `Apache 2.0 License`_.  For details, see `NOTICE.txt`_.
 
 .. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
-.. _NOTICE.txt: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+.. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 
-.. |ci-status| image:: https://travis-ci.org/nedbat/coveragepy.svg?branch=master
-    :target: https://travis-ci.org/nedbat/coveragepy
+.. |ci-status| image:: https://travis-ci.com/nedbat/coveragepy.svg?branch=master
+    :target: https://travis-ci.com/nedbat/coveragepy
     :alt: Build status
 .. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/kmeqpdje7h9r6vsf/branch/master?svg=true
     :target: https://ci.appveyor.com/project/nedbat/coveragepy
@@ -102,32 +109,44 @@
     :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
     :alt: Requirements status
 .. |kit| image:: https://badge.fury.io/py/coverage.svg
-    :target: https://pypi.python.org/pypi/coverage
+    :target: https://pypi.org/project/coverage/
     :alt: PyPI status
 .. |format| image:: https://img.shields.io/pypi/format/coverage.svg
-    :target: https://pypi.python.org/pypi/coverage
+    :target: https://pypi.org/project/coverage/
     :alt: Kit format
 .. |downloads| image:: https://img.shields.io/pypi/dw/coverage.svg
-    :target: https://pypi.python.org/pypi/coverage
+    :target: https://pypi.org/project/coverage/
     :alt: Weekly PyPI downloads
-.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg
-    :target: https://pypi.python.org/pypi/coverage
+.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072
+    :target: https://pypi.org/project/coverage/
     :alt: Python versions supported
 .. |status| image:: https://img.shields.io/pypi/status/coverage.svg
-    :target: https://pypi.python.org/pypi/coverage
+    :target: https://pypi.org/project/coverage/
     :alt: Package stability
 .. |license| image:: https://img.shields.io/pypi/l/coverage.svg
-    :target: https://pypi.python.org/pypi/coverage
+    :target: https://pypi.org/project/coverage/
     :alt: License
-.. |codecov| image:: http://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2
-    :target: http://codecov.io/github/nedbat/coveragepy?branch=master
+.. |codecov| image:: https://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2
+    :target: https://codecov.io/github/nedbat/coveragepy?branch=master
     :alt: Coverage!
 .. |repos| image:: https://repology.org/badge/tiny-repos/python:coverage.svg
     :target: https://repology.org/metapackage/python:coverage/versions
     :alt: Packaging status
-.. |saythanks| image:: https://img.shields.io/badge/saythanks.io-%E2%98%BC-1EAEDB.svg
-    :target: https://saythanks.io/to/nedbat
-    :alt: Say thanks :)
-.. |tidelift| image:: https://tidelift.com/badges/github/nedbat/coveragepy
+.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage
     :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
     :alt: Tidelift
+.. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github
+    :target: https://github.com/nedbat/coveragepy/stargazers
+    :alt: Github stars
+.. |forks| image:: https://img.shields.io/github/forks/nedbat/coveragepy.svg?logo=github
+    :target: https://github.com/nedbat/coveragepy/network/members
+    :alt: Github forks
+.. |contributors| image:: https://img.shields.io/github/contributors/nedbat/coveragepy.svg?logo=github
+    :target: https://github.com/nedbat/coveragepy/graphs/contributors
+    :alt: Contributors
+.. |twitter-coveragepy| image:: https://img.shields.io/twitter/follow/coveragepy.svg?label=coveragepy&style=flat&logo=twitter&logoColor=4FADFF
+    :target: https://twitter.com/coveragepy
+    :alt: coverage.py on Twitter
+.. |twitter-nedbat| image:: https://img.shields.io/twitter/follow/nedbat.svg?label=nedbat&style=flat&logo=twitter&logoColor=4FADFF
+    :target: https://twitter.com/nedbat
+    :alt: nedbat on Twitter
--- a/eric6/DebugClients/Python/coverage/env.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/env.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Determine facts about the environment."""
 
@@ -9,28 +9,65 @@
 
 # Operating systems.
 WINDOWS = sys.platform == "win32"
-LINUX = sys.platform == "linux2"
+LINUX = sys.platform.startswith("linux")
+
+# Python versions. We amend version_info with one more value, a zero if an
+# official version, or 1 if built from source beyond an official version.
+PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),)
+PY2 = PYVERSION < (3, 0)
+PY3 = PYVERSION >= (3, 0)
 
 # Python implementations.
 PYPY = (platform.python_implementation() == 'PyPy')
 if PYPY:
     PYPYVERSION = sys.pypy_version_info
 
+PYPY2 = PYPY and PY2
+PYPY3 = PYPY and PY3
+
 JYTHON = (platform.python_implementation() == 'Jython')
 IRONPYTHON = (platform.python_implementation() == 'IronPython')
 
-# Python versions.
-PYVERSION = sys.version_info
-PY2 = PYVERSION < (3, 0)
-PY3 = PYVERSION >= (3, 0)
-
 # Python behavior
 class PYBEHAVIOR(object):
     """Flags indicating this Python's behavior."""
 
+    # Is "if __debug__" optimized away?
+    optimize_if_debug = (not PYPY)
+
+    # Is "if not __debug__" optimized away?
+    optimize_if_not_debug = (not PYPY) and (PYVERSION >= (3, 7, 0, 'alpha', 4))
+
     # Is "if not __debug__" optimized away even better?
     optimize_if_not_debug2 = (not PYPY) and (PYVERSION >= (3, 8, 0, 'beta', 1))
 
+    # Do we have yield-from?
+    yield_from = (PYVERSION >= (3, 3))
+
+    # Do we have PEP 420 namespace packages?
+    namespaces_pep420 = (PYVERSION >= (3, 3))
+
+    # Do .pyc files have the source file size recorded in them?
+    size_in_pyc = (PYVERSION >= (3, 3))
+
+    # Do we have async and await syntax?
+    async_syntax = (PYVERSION >= (3, 5))
+
+    # PEP 448 defined additional unpacking generalizations
+    unpackings_pep448 = (PYVERSION >= (3, 5))
+
+    # Can co_lnotab have negative deltas?
+    negative_lnotab = (PYVERSION >= (3, 6))
+
+    # Do .pyc files conform to PEP 552? Hash-based pyc's.
+    hashed_pyc_pep552 = (PYVERSION >= (3, 7, 0, 'alpha', 4))
+
+    # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It
+    # used to be an empty string (meaning the current directory). It changed
+    # to be the actual path to the current directory, so that os.chdir wouldn't
+    # affect the outcome.
+    actual_syspath0_dash_m = (PYVERSION >= (3, 7, 0, 'beta', 3))
+
     # When a break/continue/return statement in a try block jumps to a finally
     # block, does the finally block do the break/continue/return (pre-3.8), or
     # does the finally jump back to the break/continue/return (3.8) to do the
@@ -45,6 +82,13 @@
     # Are while-true loops optimized into absolute jumps with no loop setup?
     nix_while_true = (PYVERSION >= (3, 8))
 
+    # Python 3.9a1 made sys.argv[0] and other reported files absolute paths.
+    report_absolute_files = (PYVERSION >= (3, 9))
+
+    # Python 3.9a2 changed how return/finally was traced, but it was
+    # temporary.
+    bpo39114 = (PYVERSION == (3, 9, 0, 'alpha', 2, 0))
+
 # Coverage.py specifics.
 
 # Are we using the C-implemented trace function?
--- a/eric6/DebugClients/Python/coverage/execfile.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/execfile.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,16 +1,19 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Execute files of Python code."""
 
+import inspect
 import marshal
 import os
 import struct
 import sys
 import types
 
+from coverage import env
 from coverage.backward import BUILTINS
 from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
+from coverage.files import canonical_filename, python_reported_file
 from coverage.misc import CoverageException, ExceptionDuringRun, NoCode, NoSource, isolate_module
 from coverage.phystokens import compile_unicode
 from coverage.python import get_python_source
@@ -31,8 +34,8 @@
     def find_module(modulename):
         """Find the module named `modulename`.
 
-        Returns the file path of the module, and the name of the enclosing
-        package.
+        Returns the file path of the module, the name of the enclosing
+        package, and the spec.
         """
         try:
             spec = importlib_util_find_spec(modulename)
@@ -42,7 +45,7 @@
             raise NoSource("No module named %r" % (modulename,))
         pathname = spec.origin
         packagename = spec.name
-        if pathname.endswith("__init__.py") and not modulename.endswith("__init__"):
+        if spec.submodule_search_locations:
             mod_main = modulename + ".__main__"
             spec = importlib_util_find_spec(mod_main)
             if not spec:
@@ -54,13 +57,13 @@
             pathname = spec.origin
             packagename = spec.name
         packagename = packagename.rpartition(".")[0]
-        return pathname, packagename
+        return pathname, packagename, spec
 else:
     def find_module(modulename):
         """Find the module named `modulename`.
 
-        Returns the file path of the module, and the name of the enclosing
-        package.
+        Returns the file path of the module, the name of the enclosing
+        package, and None (where a spec would have been).
         """
         openfile = None
         glo, loc = globals(), locals()
@@ -96,101 +99,153 @@
             if openfile:
                 openfile.close()
 
-        return pathname, packagename
+        return pathname, packagename, None
 
 
-def run_python_module(modulename, args):
-    """Run a Python module, as though with ``python -m name args...``.
-
-    `modulename` is the name of the module, possibly a dot-separated name.
-    `args` is the argument array to present as sys.argv, including the first
-    element naming the module being executed.
-
-    """
-    pathname, packagename = find_module(modulename)
+class PyRunner(object):
+    """Multi-stage execution of Python code.
 
-    pathname = os.path.abspath(pathname)
-    args[0] = pathname
-    # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It
-    # used to be an empty string (meaning the current directory). It changed
-    # to be the actual path to the current directory, so that os.chdir wouldn't
-    # affect the outcome.
-    if sys.version_info >= (3, 7, 0, 'beta', 3):
-        path0 = os.getcwd()
-    else:
-        path0 = ""
-    run_python_file(pathname, args, package=packagename, modulename=modulename, path0=path0)
-
-
-def run_python_file(filename, args, package=None, modulename=None, path0=None):
-    """Run a Python file as if it were the main program on the command line.
-
-    `filename` is the path to the file to execute, it need not be a .py file.
-    `args` is the argument array to present as sys.argv, including the first
-    element naming the file being executed.  `package` is the name of the
-    enclosing package, if any.
-
-    `modulename` is the name of the module the file was run as.
-
-    `path0` is the value to put into sys.path[0].  If it's None, then this
-    function will decide on a value.
+    This is meant to emulate real Python execution as closely as possible.
 
     """
-    if modulename is None and sys.version_info >= (3, 3):
-        modulename = '__main__'
+    def __init__(self, args, as_module=False):
+        self.args = args
+        self.as_module = as_module
+
+        self.arg0 = args[0]
+        self.package = self.modulename = self.pathname = self.loader = self.spec = None
+
+    def prepare(self):
+        """Set sys.path properly.
+
+        This needs to happen before any importing, and without importing anything.
+        """
+        if self.as_module:
+            if env.PYBEHAVIOR.actual_syspath0_dash_m:
+                path0 = os.getcwd()
+            else:
+                path0 = ""
+        elif os.path.isdir(self.arg0):
+            # Running a directory means running the __main__.py file in that
+            # directory.
+            path0 = self.arg0
+        else:
+            path0 = os.path.abspath(os.path.dirname(self.arg0))
 
-    # Create a module to serve as __main__
-    old_main_mod = sys.modules['__main__']
-    main_mod = types.ModuleType('__main__')
-    sys.modules['__main__'] = main_mod
-    main_mod.__file__ = filename
-    if package:
-        main_mod.__package__ = package
-    if modulename:
-        main_mod.__loader__ = DummyLoader(modulename)
+        if os.path.isdir(sys.path[0]):
+            # sys.path fakery.  If we are being run as a command, then sys.path[0]
+            # is the directory of the "coverage" script.  If this is so, replace
+            # sys.path[0] with the directory of the file we're running, or the
+            # current directory when running modules.  If it isn't so, then we
+            # don't know what's going on, and just leave it alone.
+            top_file = inspect.stack()[-1][0].f_code.co_filename
+            sys_path_0_abs = os.path.abspath(sys.path[0])
+            top_file_dir_abs = os.path.abspath(os.path.dirname(top_file))
+            sys_path_0_abs = canonical_filename(sys_path_0_abs)
+            top_file_dir_abs = canonical_filename(top_file_dir_abs)
+            if sys_path_0_abs != top_file_dir_abs:
+                path0 = None
 
-    main_mod.__builtins__ = BUILTINS
+        else:
+            # sys.path[0] is a file. Is the next entry the directory containing
+            # that file?
+            if sys.path[1] == os.path.dirname(sys.path[0]):
+                # Can it be right to always remove that?
+                del sys.path[1]
 
-    # Set sys.argv properly.
-    old_argv = sys.argv
-    sys.argv = args
+        if path0 is not None:
+            sys.path[0] = python_reported_file(path0)
+
+    def _prepare2(self):
+        """Do more preparation to run Python code.
+
+        Includes finding the module to run and adjusting sys.argv[0].
+        This method is allowed to import code.
 
-    if os.path.isdir(filename):
-        # Running a directory means running the __main__.py file in that
-        # directory.
-        my_path0 = filename
+        """
+        if self.as_module:
+            self.modulename = self.arg0
+            pathname, self.package, self.spec = find_module(self.modulename)
+            if self.spec is not None:
+                self.modulename = self.spec.name
+            self.loader = DummyLoader(self.modulename)
+            self.pathname = os.path.abspath(pathname)
+            self.args[0] = self.arg0 = self.pathname
+        elif os.path.isdir(self.arg0):
+            # Running a directory means running the __main__.py file in that
+            # directory.
+            for ext in [".py", ".pyc", ".pyo"]:
+                try_filename = os.path.join(self.arg0, "__main__" + ext)
+                if os.path.exists(try_filename):
+                    self.arg0 = try_filename
+                    break
+            else:
+                raise NoSource("Can't find '__main__' module in '%s'" % self.arg0)
+
+            if env.PY2:
+                self.arg0 = os.path.abspath(self.arg0)
 
-        for ext in [".py", ".pyc", ".pyo"]:
-            try_filename = os.path.join(filename, "__main__" + ext)
-            if os.path.exists(try_filename):
-                filename = try_filename
-                break
+            # Make a spec. I don't know if this is the right way to do it.
+            try:
+                import importlib.machinery
+            except ImportError:
+                pass
+            else:
+                try_filename = python_reported_file(try_filename)
+                self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename)
+                self.spec.has_location = True
+            self.package = ""
+            self.loader = DummyLoader("__main__")
         else:
-            raise NoSource("Can't find '__main__' module in '%s'" % filename)
-    else:
-        my_path0 = os.path.abspath(os.path.dirname(filename))
+            if env.PY3:
+                self.loader = DummyLoader("__main__")
+
+        self.arg0 = python_reported_file(self.arg0)
+
+    def run(self):
+        """Run the Python code!"""
+
+        self._prepare2()
+
+        # Create a module to serve as __main__
+        main_mod = types.ModuleType('__main__')
 
-    # Set sys.path correctly.
-    old_path0 = sys.path[0]
-    sys.path[0] = path0 if path0 is not None else my_path0
+        from_pyc = self.arg0.endswith((".pyc", ".pyo"))
+        main_mod.__file__ = self.arg0
+        if from_pyc:
+            main_mod.__file__ = main_mod.__file__[:-1]
+        if self.package is not None:
+            main_mod.__package__ = self.package
+        main_mod.__loader__ = self.loader
+        if self.spec is not None:
+            main_mod.__spec__ = self.spec
 
-    try:
+        main_mod.__builtins__ = BUILTINS
+
+        sys.modules['__main__'] = main_mod
+
+        # Set sys.argv properly.
+        sys.argv = self.args
+
         try:
             # Make a code object somehow.
-            if filename.endswith((".pyc", ".pyo")):
-                code = make_code_from_pyc(filename)
+            if from_pyc:
+                code = make_code_from_pyc(self.arg0)
             else:
-                code = make_code_from_py(filename)
+                code = make_code_from_py(self.arg0)
         except CoverageException:
             raise
         except Exception as exc:
-            msg = "Couldn't run {filename!r} as Python code: {exc.__class__.__name__}: {exc}"
-            raise CoverageException(msg.format(filename=filename, exc=exc))
+            msg = "Couldn't run '{filename}' as Python code: {exc.__class__.__name__}: {exc}"
+            raise CoverageException(msg.format(filename=self.arg0, exc=exc))
 
         # Execute the code object.
+        # Return to the original directory in case the test code exits in
+        # a non-existent directory.
+        cwd = os.getcwd()
         try:
             exec(code, main_mod.__dict__)
-        except SystemExit:
+        except SystemExit:                          # pylint: disable=try-except-raise
             # The user called sys.exit().  Just pass it along to the upper
             # layers, where it will be handled.
             raise
@@ -213,7 +268,7 @@
                 if hasattr(err, "__traceback__"):
                     err.__traceback__ = err.__traceback__.tb_next
                 sys.excepthook(typ, err, tb.tb_next)
-            except SystemExit:
+            except SystemExit:                      # pylint: disable=try-except-raise
                 raise
             except Exception:
                 # Getting the output right in the case of excepthook
@@ -228,12 +283,37 @@
                 raise ExceptionDuringRun(typ, err, tb.tb_next)
             else:
                 sys.exit(1)
+        finally:
+            os.chdir(cwd)
 
-    finally:
-        # Restore the old __main__, argv, and path.
-        sys.modules['__main__'] = old_main_mod
-        sys.argv = old_argv
-        sys.path[0] = old_path0
+
+def run_python_module(args):
+    """Run a Python module, as though with ``python -m name args...``.
+
+    `args` is the argument array to present as sys.argv, including the first
+    element naming the module being executed.
+
+    This is a helper for tests, to encapsulate how to use PyRunner.
+
+    """
+    runner = PyRunner(args, as_module=True)
+    runner.prepare()
+    runner.run()
+
+
+def run_python_file(args):
+    """Run a Python file as if it were the main program on the command line.
+
+    `args` is the argument array to present as sys.argv, including the first
+    element naming the file being executed.  `package` is the name of the
+    enclosing package, if any.
+
+    This is a helper for tests, to encapsulate how to use PyRunner.
+
+    """
+    runner = PyRunner(args, as_module=False)
+    runner.prepare()
+    runner.run()
 
 
 def make_code_from_py(filename):
@@ -260,10 +340,10 @@
         # match or we won't run the file.
         magic = fpyc.read(4)
         if magic != PYC_MAGIC_NUMBER:
-            raise NoCode("Bad magic number in .pyc file")
+            raise NoCode("Bad magic number in .pyc file: {} != {}".format(magic, PYC_MAGIC_NUMBER))
 
         date_based = True
-        if sys.version_info >= (3, 7, 0, 'alpha', 4):
+        if env.PYBEHAVIOR.hashed_pyc_pep552:
             flags = struct.unpack('<L', fpyc.read(4))[0]
             hash_based = flags & 0x01
             if hash_based:
@@ -272,7 +352,7 @@
         if date_based:
             # Skip the junk in the header that we don't need.
             fpyc.read(4)            # Skip the moddate.
-            if sys.version_info >= (3, 3):
+            if env.PYBEHAVIOR.size_in_pyc:
                 # 3.3 added another long to the header (size), skip it.
                 fpyc.read(4)
 
--- a/eric6/DebugClients/Python/coverage/files.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/files.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """File wrangling."""
 
@@ -59,6 +59,7 @@
 
     """
     if filename not in CANONICAL_FILENAME_CACHE:
+        cf = filename
         if not os.path.isabs(filename):
             for path in [os.curdir] + sys.path:
                 if path is None:
@@ -69,9 +70,9 @@
                 except UnicodeError:
                     exists = False
                 if exists:
-                    filename = f
+                    cf = f
                     break
-        cf = abs_file(filename)
+        cf = abs_file(cf)
         CANONICAL_FILENAME_CACHE[filename] = cf
     return CANONICAL_FILENAME_CACHE[filename]
 
@@ -122,7 +123,9 @@
             else:
                 try:
                     files = os.listdir(head)
-                except OSError:
+                except Exception:
+                    # This will raise OSError, or this bizarre TypeError:
+                    # https://bugs.python.org/issue1776160
                     files = []
                 _ACTUAL_PATH_LIST_CACHE[head] = files
             normtail = os.path.normcase(tail)
@@ -156,9 +159,8 @@
 
 
 @contract(returns='unicode')
-def abs_file(filename):
-    """Return the absolute normalized form of `filename`."""
-    path = os.path.expandvars(os.path.expanduser(filename))
+def abs_file(path):
+    """Return the absolute normalized form of `path`."""
     try:
         path = os.path.realpath(path)
     except UnicodeError:
@@ -169,6 +171,13 @@
     return path
 
 
+def python_reported_file(filename):
+    """Return the string as Python would describe this file name."""
+    if env.PYBEHAVIOR.report_absolute_files:
+        filename = os.path.abspath(filename)
+    return filename
+
+
 RELATIVE_DIR = None
 CANONICAL_FILENAME_CACHE = None
 set_relative_directory()
@@ -335,7 +344,7 @@
     def pprint(self):       # pragma: debugging
         """Dump the important parts of the PathAliases, for debugging."""
         for regex, result in self.aliases:
-            print("{0!r} --> {1!r}".format(regex.pattern, result))
+            print("{!r} --> {!r}".format(regex.pattern, result))
 
     def add(self, pattern, result):
         """Add the `pattern`/`result` pair to the list of aliases.
--- a/eric6/DebugClients/Python/coverage/html.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/html.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,19 +1,21 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """HTML reporting for coverage.py."""
 
 import datetime
 import json
 import os
+import re
 import shutil
 
 import coverage
 from coverage import env
-from coverage.backward import iitems
+from coverage.backward import iitems, SimpleNamespace
+from coverage.data import add_data_to_hash
 from coverage.files import flat_rootname
-from coverage.misc import CoverageException, file_be_gone, Hasher, isolate_module
-from coverage.report import Reporter
+from coverage.misc import CoverageException, ensure_dir, file_be_gone, Hasher, isolate_module
+from coverage.report import get_analysis_to_report
 from coverage.results import Numbers
 from coverage.templite import Templite
 
@@ -66,11 +68,90 @@
 
 def write_html(fname, html):
     """Write `html` to `fname`, properly encoded."""
+    html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
     with open(fname, "wb") as fout:
         fout.write(html.encode('ascii', 'xmlcharrefreplace'))
 
 
-class HtmlReporter(Reporter):
+class HtmlDataGeneration(object):
+    """Generate structured data to be turned into HTML reports."""
+
+    EMPTY = "(empty)"
+
+    def __init__(self, cov):
+        self.coverage = cov
+        self.config = self.coverage.config
+        data = self.coverage.get_data()
+        self.has_arcs = data.has_arcs()
+        if self.config.show_contexts:
+            if data.measured_contexts() == set([""]):
+                self.coverage._warn("No contexts were measured")
+        data.set_query_contexts(self.config.report_contexts)
+
+    def data_for_file(self, fr, analysis):
+        """Produce the data needed for one file's report."""
+        if self.has_arcs:
+            missing_branch_arcs = analysis.missing_branch_arcs()
+            arcs_executed = analysis.arcs_executed()
+
+        if self.config.show_contexts:
+            contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename)
+
+        lines = []
+
+        for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
+            # Figure out how to mark this line.
+            category = None
+            short_annotations = []
+            long_annotations = []
+
+            if lineno in analysis.excluded:
+                category = 'exc'
+            elif lineno in analysis.missing:
+                category = 'mis'
+            elif self.has_arcs and lineno in missing_branch_arcs:
+                category = 'par'
+                for b in missing_branch_arcs[lineno]:
+                    if b < 0:
+                        short_annotations.append("exit")
+                    else:
+                        short_annotations.append(b)
+                    long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
+            elif lineno in analysis.statements:
+                category = 'run'
+
+            contexts = contexts_label = None
+            context_list = None
+            if category and self.config.show_contexts:
+                contexts = sorted(c or self.EMPTY for c in contexts_by_lineno[lineno])
+                if contexts == [self.EMPTY]:
+                    contexts_label = self.EMPTY
+                else:
+                    contexts_label = "{} ctx".format(len(contexts))
+                    context_list = contexts
+
+            lines.append(SimpleNamespace(
+                tokens=tokens,
+                number=lineno,
+                category=category,
+                statement=(lineno in analysis.statements),
+                contexts=contexts,
+                contexts_label=contexts_label,
+                context_list=context_list,
+                short_annotations=short_annotations,
+                long_annotations=long_annotations,
+            ))
+
+        file_data = SimpleNamespace(
+            relative_filename=fr.relative_filename(),
+            nums=analysis.numbers,
+            lines=lines,
+        )
+
+        return file_data
+
+
+class HtmlReporter(object):
     """HTML reporting."""
 
     # These files will be copied from the htmlfiles directory to the output
@@ -87,30 +168,54 @@
         ("keybd_open.png", ""),
     ]
 
-    def __init__(self, cov, config):
-        super(HtmlReporter, self).__init__(cov, config)
-        self.directory = None
+    def __init__(self, cov):
+        self.coverage = cov
+        self.config = self.coverage.config
+        self.directory = self.config.html_dir
         title = self.config.html_title
         if env.PY2:
             title = title.decode("utf8")
+
+        if self.config.extra_css:
+            self.extra_css = os.path.basename(self.config.extra_css)
+        else:
+            self.extra_css = None
+
+        self.data = self.coverage.get_data()
+        self.has_arcs = self.data.has_arcs()
+
+        self.file_summaries = []
+        self.all_files_nums = []
+        self.incr = IncrementalChecker(self.directory)
+        self.datagen = HtmlDataGeneration(self.coverage)
+        self.totals = Numbers()
+
         self.template_globals = {
+            # Functions available in the templates.
             'escape': escape,
             'pair': pair,
-            'title': title,
+            'len': len,
+
+            # Constants for this report.
             '__url__': coverage.__url__,
             '__version__': coverage.__version__,
-        }
-        self.source_tmpl = Templite(read_data("pyfile.html"), self.template_globals)
-
-        self.coverage = cov
+            'title': title,
+            'time_stamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M'),
+            'extra_css': self.extra_css,
+            'has_arcs': self.has_arcs,
+            'show_contexts': self.config.show_contexts,
 
-        self.files = []
-        self.all_files_nums = []
-        self.has_arcs = self.coverage.data.has_arcs()
-        self.status = HtmlStatus()
-        self.extra_css = None
-        self.totals = Numbers()
-        self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
+            # Constants for all reports.
+            # These css classes determine which lines are highlighted by default.
+            'category': {
+                'exc': 'exc show_exc',
+                'mis': 'mis show_mis',
+                'par': 'par run show_par',
+                'run': 'run',
+            }
+        }
+        self.pyfile_html_source = read_data("pyfile.html")
+        self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
 
     def report(self, morfs):
         """Generate an HTML report for `morfs`.
@@ -118,29 +223,20 @@
         `morfs` is a list of modules or file names.
 
         """
-        assert self.config.html_dir, "must give a directory for html reporting"
-
-        # Read the status data.
-        self.status.read(self.config.html_dir)
-
-        # Check that this run used the same settings as the last run.
-        m = Hasher()
-        m.update(self.config)
-        these_settings = m.hexdigest()
-        if self.status.settings_hash() != these_settings:
-            self.status.reset()
-            self.status.set_settings_hash(these_settings)
-
-        # The user may have extra CSS they want copied.
-        if self.config.extra_css:
-            self.extra_css = os.path.basename(self.config.extra_css)
+        # Read the status data and check that this run used the same
+        # global data as the last run.
+        self.incr.read()
+        self.incr.check_global_data(self.config, self.pyfile_html_source)
 
         # Process all the files.
-        self.report_files(self.html_file, morfs, self.config.html_dir)
+        for fr, analysis in get_analysis_to_report(self.coverage, morfs):
+            self.html_file(fr, analysis)
 
         if not self.all_files_nums:
             raise CoverageException("No data to report.")
 
+        self.totals = sum(self.all_files_nums)
+
         # Write the index file.
         self.index_file()
 
@@ -163,17 +259,11 @@
                 os.path.join(self.directory, self.extra_css)
             )
 
-    def file_hash(self, source, fr):
-        """Compute a hash that changes if the file needs to be re-reported."""
-        m = Hasher()
-        m.update(source)
-        self.coverage.data.add_to_hash(fr.filename, m)
-        return m.hexdigest()
-
     def html_file(self, fr, analysis):
         """Generate an HTML file for one source file."""
         rootname = flat_rootname(fr.relative_filename())
         html_filename = rootname + ".html"
+        ensure_dir(self.directory)
         html_path = os.path.join(self.directory, html_filename)
 
         # Get the numbers for this file.
@@ -189,100 +279,63 @@
                 file_be_gone(html_path)
                 return
 
-        source = fr.source()
+        if self.config.skip_empty:
+            # Don't report on empty files.
+            if nums.n_statements == 0:
+                file_be_gone(html_path)
+                return
 
         # Find out if the file on disk is already correct.
-        this_hash = self.file_hash(source.encode('utf-8'), fr)
-        that_hash = self.status.file_hash(rootname)
-        if this_hash == that_hash:
-            # Nothing has changed to require the file to be reported again.
-            self.files.append(self.status.index_info(rootname))
+        if self.incr.can_skip_file(self.data, fr, rootname):
+            self.file_summaries.append(self.incr.index_info(rootname))
             return
 
-        self.status.set_file_hash(rootname, this_hash)
-
-        if self.has_arcs:
-            missing_branch_arcs = analysis.missing_branch_arcs()
-            arcs_executed = analysis.arcs_executed()
-
-        # These classes determine which lines are highlighted by default.
-        c_run = "run hide_run"
-        c_exc = "exc"
-        c_mis = "mis"
-        c_par = "par " + c_run
-
-        lines = []
-
-        for lineno, line in enumerate(fr.source_token_lines(), start=1):
-            # Figure out how to mark this line.
-            line_class = []
-            annotate_html = ""
-            annotate_long = ""
-            if lineno in analysis.statements:
-                line_class.append("stm")
-            if lineno in analysis.excluded:
-                line_class.append(c_exc)
-            elif lineno in analysis.missing:
-                line_class.append(c_mis)
-            elif self.has_arcs and lineno in missing_branch_arcs:
-                line_class.append(c_par)
-                shorts = []
-                longs = []
-                for b in missing_branch_arcs[lineno]:
-                    if b < 0:
-                        shorts.append("exit")
-                    else:
-                        shorts.append(b)
-                    longs.append(fr.missing_arc_description(lineno, b, arcs_executed))
-                # 202F is NARROW NO-BREAK SPACE.
-                # 219B is RIGHTWARDS ARROW WITH STROKE.
-                short_fmt = "%s&#x202F;&#x219B;&#x202F;%s"
-                annotate_html = ",&nbsp;&nbsp; ".join(short_fmt % (lineno, d) for d in shorts)
-
-                if len(longs) == 1:
-                    annotate_long = longs[0]
-                else:
-                    annotate_long = "%d missed branches: %s" % (
-                        len(longs),
-                        ", ".join("%d) %s" % (num, ann_long)
-                            for num, ann_long in enumerate(longs, start=1)),
-                    )
-            elif lineno in analysis.statements:
-                line_class.append(c_run)
-
+        # Write the HTML page for this file.
+        file_data = self.datagen.data_for_file(fr, analysis)
+        for ldata in file_data.lines:
             # Build the HTML for the line.
             html = []
-            for tok_type, tok_text in line:
+            for tok_type, tok_text in ldata.tokens:
                 if tok_type == "ws":
                     html.append(escape(tok_text))
                 else:
                     tok_html = escape(tok_text) or '&nbsp;'
                     html.append(
-                        '<span class="%s">%s</span>' % (tok_type, tok_html)
+                        u'<span class="{}">{}</span>'.format(tok_type, tok_html)
                     )
+            ldata.html = ''.join(html)
 
-            lines.append({
-                'html': ''.join(html),
-                'number': lineno,
-                'class': ' '.join(line_class) or "pln",
-                'annotate': annotate_html,
-                'annotate_long': annotate_long,
-            })
+            if ldata.short_annotations:
+                # 202F is NARROW NO-BREAK SPACE.
+                # 219B is RIGHTWARDS ARROW WITH STROKE.
+                ldata.annotate = u",&nbsp;&nbsp; ".join(
+                    u"{}&#x202F;&#x219B;&#x202F;{}".format(ldata.number, d)
+                    for d in ldata.short_annotations
+                    )
+            else:
+                ldata.annotate = None
 
-        # Write the HTML page for this file.
-        html = self.source_tmpl.render({
-            'c_exc': c_exc,
-            'c_mis': c_mis,
-            'c_par': c_par,
-            'c_run': c_run,
-            'has_arcs': self.has_arcs,
-            'extra_css': self.extra_css,
-            'fr': fr,
-            'nums': nums,
-            'lines': lines,
-            'time_stamp': self.time_stamp,
-        })
+            if ldata.long_annotations:
+                longs = ldata.long_annotations
+                if len(longs) == 1:
+                    ldata.annotate_long = longs[0]
+                else:
+                    ldata.annotate_long = u"{:d} missed branches: {}".format(
+                        len(longs),
+                        u", ".join(
+                            u"{:d}) {}".format(num, ann_long)
+                            for num, ann_long in enumerate(longs, start=1)
+                            ),
+                    )
+            else:
+                ldata.annotate_long = None
 
+            css_classes = []
+            if ldata.category:
+                css_classes.append(self.template_globals['category'][ldata.category])
+            ldata.css_class = ' '.join(css_classes) or "pln"
+
+        html = self.source_tmpl.render(file_data.__dict__)
         write_html(html_path, html)
 
         # Save this file's information for the index file.
@@ -291,77 +344,73 @@
             'html_filename': html_filename,
             'relative_filename': fr.relative_filename(),
         }
-        self.files.append(index_info)
-        self.status.set_index_info(rootname, index_info)
+        self.file_summaries.append(index_info)
+        self.incr.set_index_info(rootname, index_info)
 
     def index_file(self):
         """Write the index.html file for this report."""
         index_tmpl = Templite(read_data("index.html"), self.template_globals)
 
-        self.totals = sum(self.all_files_nums)
-
         html = index_tmpl.render({
-            'has_arcs': self.has_arcs,
-            'extra_css': self.extra_css,
-            'files': self.files,
+            'files': self.file_summaries,
             'totals': self.totals,
-            'time_stamp': self.time_stamp,
         })
 
         write_html(os.path.join(self.directory, "index.html"), html)
 
         # Write the latest hashes for next time.
-        self.status.write(self.directory)
+        self.incr.write()
 
 
-class HtmlStatus(object):
-    """The status information we keep to support incremental reporting."""
+class IncrementalChecker(object):
+    """Logic and data to support incremental reporting."""
 
     STATUS_FILE = "status.json"
-    STATUS_FORMAT = 1
+    STATUS_FORMAT = 2
 
     #           pylint: disable=wrong-spelling-in-comment,useless-suppression
     #  The data looks like:
     #
     #  {
-    #      'format': 1,
-    #      'settings': '540ee119c15d52a68a53fe6f0897346d',
-    #      'version': '4.0a1',
-    #      'files': {
-    #          'cogapp___init__': {
-    #              'hash': 'e45581a5b48f879f301c0f30bf77a50c',
-    #              'index': {
-    #                  'html_filename': 'cogapp___init__.html',
-    #                  'name': 'cogapp/__init__',
-    #                  'nums': <coverage.results.Numbers object at 0x10ab7ed0>,
+    #      "format": 2,
+    #      "globals": "540ee119c15d52a68a53fe6f0897346d",
+    #      "version": "4.0a1",
+    #      "files": {
+    #          "cogapp___init__": {
+    #              "hash": "e45581a5b48f879f301c0f30bf77a50c",
+    #              "index": {
+    #                  "html_filename": "cogapp___init__.html",
+    #                  "relative_filename": "cogapp/__init__",
+    #                  "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
     #              }
     #          },
     #          ...
-    #          'cogapp_whiteutils': {
-    #              'hash': '8504bb427fc488c4176809ded0277d51',
-    #              'index': {
-    #                  'html_filename': 'cogapp_whiteutils.html',
-    #                  'name': 'cogapp/whiteutils',
-    #                  'nums': <coverage.results.Numbers object at 0x10ab7d90>,
+    #          "cogapp_whiteutils": {
+    #              "hash": "8504bb427fc488c4176809ded0277d51",
+    #              "index": {
+    #                  "html_filename": "cogapp_whiteutils.html",
+    #                  "relative_filename": "cogapp/whiteutils",
+    #                  "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
     #              }
-    #          },
-    #      },
+    #          }
+    #      }
     #  }
 
-    def __init__(self):
+    def __init__(self, directory):
+        self.directory = directory
         self.reset()
 
     def reset(self):
-        """Initialize to empty."""
-        self.settings = ''
+        """Initialize to empty. Causes all files to be reported."""
+        self.globals = ''
         self.files = {}
 
-    def read(self, directory):
-        """Read the last status in `directory`."""
+    def read(self):
+        """Read the information we stored last time."""
         usable = False
         try:
-            status_file = os.path.join(directory, self.STATUS_FILE)
-            with open(status_file, "r") as fstatus:
+            status_file = os.path.join(self.directory, self.STATUS_FILE)
+            with open(status_file) as fstatus:
                 status = json.load(fstatus)
         except (IOError, ValueError):
             usable = False
@@ -377,13 +426,13 @@
             for filename, fileinfo in iitems(status['files']):
                 fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
                 self.files[filename] = fileinfo
-            self.settings = status['settings']
+            self.globals = status['globals']
         else:
             self.reset()
 
-    def write(self, directory):
-        """Write the current status to `directory`."""
-        status_file = os.path.join(directory, self.STATUS_FILE)
+    def write(self):
+        """Write the current status."""
+        status_file = os.path.join(self.directory, self.STATUS_FILE)
         files = {}
         for filename, fileinfo in iitems(self.files):
             fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
@@ -392,26 +441,41 @@
         status = {
             'format': self.STATUS_FORMAT,
             'version': coverage.__version__,
-            'settings': self.settings,
+            'globals': self.globals,
             'files': files,
         }
         with open(status_file, "w") as fout:
             json.dump(status, fout, separators=(',', ':'))
 
-        # Older versions of ShiningPanda look for the old name, status.dat.
-        # Accommodate them if we are running under Jenkins.
-        # https://issues.jenkins-ci.org/browse/JENKINS-28428
-        if "JENKINS_URL" in os.environ:
-            with open(os.path.join(directory, "status.dat"), "w") as dat:
-                dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n")
+    def check_global_data(self, *data):
+        """Check the global data that can affect incremental reporting."""
+        m = Hasher()
+        for d in data:
+            m.update(d)
+        these_globals = m.hexdigest()
+        if self.globals != these_globals:
+            self.reset()
+            self.globals = these_globals
+
+    def can_skip_file(self, data, fr, rootname):
+        """Can we skip reporting this file?
 
-    def settings_hash(self):
-        """Get the hash of the coverage.py settings."""
-        return self.settings
+        `data` is a CoverageData object, `fr` is a `FileReporter`, and
+        `rootname` is the name being used for the file.
+        """
+        m = Hasher()
+        m.update(fr.source().encode('utf-8'))
+        add_data_to_hash(data, fr.filename, m)
+        this_hash = m.hexdigest()
 
-    def set_settings_hash(self, settings):
-        """Set the hash of the coverage.py settings."""
-        self.settings = settings
+        that_hash = self.file_hash(rootname)
+
+        if this_hash == that_hash:
+            # Nothing has changed to require the file to be reported again.
+            return True
+        else:
+            self.set_file_hash(rootname, this_hash)
+            return False
 
     def file_hash(self, fname):
         """Get the hash of `fname`'s contents."""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/eric6/DebugClients/Python/coverage/inorout.py	Sat Feb 22 14:27:42 2020 +0100
@@ -0,0 +1,469 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Determining whether files are being measured/reported or not."""
+
+# For finding the stdlib
+import atexit
+import inspect
+import itertools
+import os
+import platform
+import re
+import sys
+import traceback
+
+from coverage import env
+from coverage.backward import code_object
+from coverage.disposition import FileDisposition, disposition_init
+from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher
+from coverage.files import prep_patterns, find_python_files, canonical_filename
+from coverage.misc import CoverageException
+from coverage.python import source_for_file, source_for_morf
+
+
+# Pypy has some unusual stuff in the "stdlib".  Consider those locations
+# when deciding where the stdlib is.  These modules are not used for anything,
+# they are modules importable from the pypy lib directories, so that we can
+# find those directories.
+_structseq = _pypy_irc_topic = None
+if env.PYPY:
+    try:
+        import _structseq
+    except ImportError:
+        pass
+
+    try:
+        import _pypy_irc_topic
+    except ImportError:
+        pass
+
+
+def canonical_path(morf, directory=False):
+    """Return the canonical path of the module or file `morf`.
+
+    If the module is a package, then return its directory. If it is a
+    module, then return its file, unless `directory` is True, in which
+    case return its enclosing directory.
+
+    """
+    morf_path = canonical_filename(source_for_morf(morf))
+    if morf_path.endswith("__init__.py") or directory:
+        morf_path = os.path.split(morf_path)[0]
+    return morf_path
+
+
+def name_for_module(filename, frame):
+    """Get the name of the module for a filename and frame.
+
+    For configurability's sake, we allow __main__ modules to be matched by
+    their importable name.
+
+    If loaded via runpy (aka -m), we can usually recover the "original"
+    full dotted module name, otherwise, we resort to interpreting the
+    file name to get the module's name.  In the case that the module name
+    can't be determined, None is returned.
+
+    """
+    module_globals = frame.f_globals if frame is not None else {}
+    if module_globals is None:          # pragma: only ironpython
+        # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296
+        module_globals = {}
+
+    dunder_name = module_globals.get('__name__', None)
+
+    if isinstance(dunder_name, str) and dunder_name != '__main__':
+        # This is the usual case: an imported module.
+        return dunder_name
+
+    loader = module_globals.get('__loader__', None)
+    for attrname in ('fullname', 'name'):   # attribute renamed in py3.2
+        if hasattr(loader, attrname):
+            fullname = getattr(loader, attrname)
+        else:
+            continue
+
+        if isinstance(fullname, str) and fullname != '__main__':
+            # Module loaded via: runpy -m
+            return fullname
+
+    # Script as first argument to Python command line.
+    inspectedname = inspect.getmodulename(filename)
+    if inspectedname is not None:
+        return inspectedname
+    else:
+        return dunder_name
+
+
+def module_is_namespace(mod):
+    """Is the module object `mod` a PEP420 namespace module?"""
+    return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
+
+
+def module_has_file(mod):
+    """Does the module object `mod` have an existing __file__ ?"""
+    mod__file__ = getattr(mod, '__file__', None)
+    if mod__file__ is None:
+        return False
+    return os.path.exists(mod__file__)
+
+
+class InOrOut(object):
+    """Machinery for determining what files to measure."""
+
+    def __init__(self, warn):
+        self.warn = warn
+
+        # The matchers for should_trace.
+        self.source_match = None
+        self.source_pkgs_match = None
+        self.pylib_paths = self.cover_paths = None
+        self.pylib_match = self.cover_match = None
+        self.include_match = self.omit_match = None
+        self.plugins = []
+        self.disp_class = FileDisposition
+
+        # The source argument can be directories or package names.
+        self.source = []
+        self.source_pkgs = []
+        self.source_pkgs_unmatched = []
+        self.omit = self.include = None
+
+    def configure(self, config):
+        """Apply the configuration to get ready for decision-time."""
+        for src in config.source or []:
+            if os.path.isdir(src):
+                self.source.append(canonical_filename(src))
+            else:
+                self.source_pkgs.append(src)
+        self.source_pkgs_unmatched = self.source_pkgs[:]
+
+        self.omit = prep_patterns(config.run_omit)
+        self.include = prep_patterns(config.run_include)
+
+        # The directories for files considered "installed with the interpreter".
+        self.pylib_paths = set()
+        if not config.cover_pylib:
+            # Look at where some standard modules are located. That's the
+            # indication for "installed with the interpreter". In some
+            # environments (virtualenv, for example), these modules may be
+            # spread across a few locations. Look at all the candidate modules
+            # we've imported, and take all the different ones.
+            for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback):
+                if m is not None and hasattr(m, "__file__"):
+                    self.pylib_paths.add(canonical_path(m, directory=True))
+
+            if _structseq and not hasattr(_structseq, '__file__'):
+                # PyPy 2.4 has no __file__ in the builtin modules, but the code
+                # objects still have the file names.  So dig into one to find
+                # the path to exclude.  The "filename" might be synthetic,
+                # don't be fooled by those.
+                structseq_file = code_object(_structseq.structseq_new).co_filename
+                if not structseq_file.startswith("<"):
+                    self.pylib_paths.add(canonical_path(structseq_file))
+
+        # To avoid tracing the coverage.py code itself, we skip anything
+        # located where we are.
+        self.cover_paths = [canonical_path(__file__, directory=True)]
+        if env.TESTING:
+            # Don't include our own test code.
+            self.cover_paths.append(os.path.join(self.cover_paths[0], "tests"))
+
+            # When testing, we use PyContracts, which should be considered
+            # part of coverage.py, and it uses six. Exclude those directories
+            # just as we exclude ourselves.
+            import contracts
+            import six
+            for mod in [contracts, six]:
+                self.cover_paths.append(canonical_path(mod))
+
+        # Create the matchers we need for should_trace
+        if self.source or self.source_pkgs:
+            self.source_match = TreeMatcher(self.source)
+            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
+        else:
+            if self.cover_paths:
+                self.cover_match = TreeMatcher(self.cover_paths)
+            if self.pylib_paths:
+                self.pylib_match = TreeMatcher(self.pylib_paths)
+        if self.include:
+            self.include_match = FnmatchMatcher(self.include)
+        if self.omit:
+            self.omit_match = FnmatchMatcher(self.omit)
+
+    def should_trace(self, filename, frame=None):
+        """Decide whether to trace execution in `filename`, with a reason.
+
+        This function is called from the trace function.  As each new file name
+        is encountered, this function determines whether it is traced or not.
+
+        Returns a FileDisposition object.
+
+        """
+        original_filename = filename
+        disp = disposition_init(self.disp_class, filename)
+
+        def nope(disp, reason):
+            """Simple helper to make it easy to return NO."""
+            disp.trace = False
+            disp.reason = reason
+            return disp
+
+        if frame is not None:
+            # Compiled Python files have two file names: frame.f_code.co_filename is
+            # the file name at the time the .pyc was compiled.  The second name is
+            # __file__, which is where the .pyc was actually loaded from.  Since
+            # .pyc files can be moved after compilation (for example, by being
+            # installed), we look for __file__ in the frame and prefer it to the
+            # co_filename value.
+            dunder_file = frame.f_globals and frame.f_globals.get('__file__')
+            if dunder_file:
+                filename = source_for_file(dunder_file)
+                if original_filename and not original_filename.startswith('<'):
+                    orig = os.path.basename(original_filename)
+                    if orig != os.path.basename(filename):
+                        # Files shouldn't be renamed when moved. This happens when
+                        # exec'ing code.  If it seems like something is wrong with
+                        # the frame's file name, then just use the original.
+                        filename = original_filename
+
+        if not filename:
+            # Empty string is pretty useless.
+            return nope(disp, "empty string isn't a file name")
+
+        if filename.startswith('memory:'):
+            return nope(disp, "memory isn't traceable")
+
+        if filename.startswith('<'):
+            # Lots of non-file execution is represented with artificial
+            # file names like "<string>", "<doctest readme.txt[0]>", or
+            # "<exec_function>".  Don't ever trace these executions, since we
+            # can't do anything with the data later anyway.
+            return nope(disp, "not a real file name")
+
+        # pyexpat does a dumb thing, calling the trace function explicitly from
+        # C code with a C file name.
+        if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
+            return nope(disp, "pyexpat lies about itself")
+
+        # Jython reports the .class file to the tracer, use the source file.
+        if filename.endswith("$py.class"):
+            filename = filename[:-9] + ".py"
+
+        canonical = canonical_filename(filename)
+        disp.canonical_filename = canonical
+
+        # Try the plugins, see if they have an opinion about the file.
+        plugin = None
+        for plugin in self.plugins.file_tracers:
+            if not plugin._coverage_enabled:
+                continue
+
+            try:
+                file_tracer = plugin.file_tracer(canonical)
+                if file_tracer is not None:
+                    file_tracer._coverage_plugin = plugin
+                    disp.trace = True
+                    disp.file_tracer = file_tracer
+                    if file_tracer.has_dynamic_source_filename():
+                        disp.has_dynamic_filename = True
+                    else:
+                        disp.source_filename = canonical_filename(
+                            file_tracer.source_filename()
+                        )
+                    break
+            except Exception:
+                self.warn(
+                    "Disabling plug-in %r due to an exception:" % (plugin._coverage_plugin_name)
+                )
+                traceback.print_exc()
+                plugin._coverage_enabled = False
+                continue
+        else:
+            # No plugin wanted it: it's Python.
+            disp.trace = True
+            disp.source_filename = canonical
+
+        if not disp.has_dynamic_filename:
+            if not disp.source_filename:
+                raise CoverageException(
+                    "Plugin %r didn't set source_filename for %r" %
+                    (plugin, disp.original_filename)
+                )
+            reason = self.check_include_omit_etc(disp.source_filename, frame)
+            if reason:
+                nope(disp, reason)
+
+        return disp
+
+    def check_include_omit_etc(self, filename, frame):
+        """Check a file name against the include, omit, etc, rules.
+
+        Returns a string or None.  String means, don't trace, and is the reason
+        why.  None means no reason found to not trace.
+
+        """
+        modulename = name_for_module(filename, frame)
+
+        # If the user specified source or include, then that's authoritative
+        # about the outer bound of what to measure and we don't have to apply
+        # any canned exclusions. If they didn't, then we have to exclude the
+        # stdlib and coverage.py directories.
+        if self.source_match:
+            if self.source_pkgs_match.match(modulename):
+                if modulename in self.source_pkgs_unmatched:
+                    self.source_pkgs_unmatched.remove(modulename)
+            elif not self.source_match.match(filename):
+                return "falls outside the --source trees"
+        elif self.include_match:
+            if not self.include_match.match(filename):
+                return "falls outside the --include trees"
+        else:
+            # If we aren't supposed to trace installed code, then check if this
+            # is near the Python standard library and skip it if so.
+            if self.pylib_match and self.pylib_match.match(filename):
+                return "is in the stdlib"
+
+            # We exclude the coverage.py code itself, since a little of it
+            # will be measured otherwise.
+            if self.cover_match and self.cover_match.match(filename):
+                return "is part of coverage.py"
+
+        # Check the file against the omit pattern.
+        if self.omit_match and self.omit_match.match(filename):
+            return "is inside an --omit pattern"
+
+        # No point tracing a file we can't later write to SQLite.
+        try:
+            filename.encode("utf8")
+        except UnicodeEncodeError:
+            return "non-encodable filename"
+
+        # No reason found to skip this file.
+        return None
+
+    def warn_conflicting_settings(self):
+        """Warn if there are settings that conflict."""
+        if self.include:
+            if self.source or self.source_pkgs:
+                self.warn("--include is ignored because --source is set", slug="include-ignored")
+
+    def warn_already_imported_files(self):
+        """Warn if files have already been imported that we will be measuring."""
+        if self.include or self.source or self.source_pkgs:
+            warned = set()
+            for mod in list(sys.modules.values()):
+                filename = getattr(mod, "__file__", None)
+                if filename is None:
+                    continue
+                if filename in warned:
+                    continue
+
+                disp = self.should_trace(filename)
+                if disp.trace:
+                    msg = "Already imported a file that will be measured: {}".format(filename)
+                    self.warn(msg, slug="already-imported")
+                    warned.add(filename)
+
+    def warn_unimported_source(self):
+        """Warn about source packages that were of interest, but never traced."""
+        for pkg in self.source_pkgs_unmatched:
+            self._warn_about_unmeasured_code(pkg)
+
+    def _warn_about_unmeasured_code(self, pkg):
+        """Warn about a package or module that we never traced.
+
+        `pkg` is a string, the name of the package or module.
+
+        """
+        mod = sys.modules.get(pkg)
+        if mod is None:
+            self.warn("Module %s was never imported." % pkg, slug="module-not-imported")
+            return
+
+        if module_is_namespace(mod):
+            # A namespace package. It's OK for this not to have been traced,
+            # since there is no code directly in it.
+            return
+
+        if not module_has_file(mod):
+            self.warn("Module %s has no Python source." % pkg, slug="module-not-python")
+            return
+
+        # The module was in sys.modules, and seems like a module with code, but
+        # we never measured it. I guess that means it was imported before
+        # coverage even started.
+        self.warn(
+            "Module %s was previously imported, but not measured" % pkg,
+            slug="module-not-measured",
+        )
+
+    def find_possibly_unexecuted_files(self):
+        """Find files in the areas of interest that might be untraced.
+
+        Yields pairs: file path, and responsible plug-in name.
+        """
+        for pkg in self.source_pkgs:
+            if (not pkg in sys.modules or
+                not module_has_file(sys.modules[pkg])):
+                continue
+            pkg_file = source_for_file(sys.modules[pkg].__file__)
+            for ret in self._find_executable_files(canonical_path(pkg_file)):
+                yield ret
+
+        for src in self.source:
+            for ret in self._find_executable_files(src):
+                yield ret
+
+    def _find_plugin_files(self, src_dir):
+        """Get executable files from the plugins."""
+        for plugin in self.plugins.file_tracers:
+            for x_file in plugin.find_executable_files(src_dir):
+                yield x_file, plugin._coverage_plugin_name
+
+    def _find_executable_files(self, src_dir):
+        """Find executable files in `src_dir`.
+
+        Search for files in `src_dir` that can be executed because they
+        are probably importable. Don't include ones that have been omitted
+        by the configuration.
+
+        Yield the file path, and the plugin name that handles the file.
+
+        """
+        py_files = ((py_file, None) for py_file in find_python_files(src_dir))
+        plugin_files = self._find_plugin_files(src_dir)
+
+        for file_path, plugin_name in itertools.chain(py_files, plugin_files):
+            file_path = canonical_filename(file_path)
+            if self.omit_match and self.omit_match.match(file_path):
+                # Turns out this file was omitted, so don't pull it back
+                # in as unexecuted.
+                continue
+            yield file_path, plugin_name
+
+    def sys_info(self):
+        """Our information for Coverage.sys_info.
+
+        Returns a list of (key, value) pairs.
+        """
+        info = [
+            ('cover_paths', self.cover_paths),
+            ('pylib_paths', self.pylib_paths),
+        ]
+
+        matcher_names = [
+            'source_match', 'source_pkgs_match',
+            'include_match', 'omit_match',
+            'cover_match', 'pylib_match',
+            ]
+
+        for matcher_name in matcher_names:
+            matcher = getattr(self, matcher_name)
+            if matcher:
+                matcher_info = matcher.info()
+            else:
+                matcher_info = '-none-'
+            info.append((matcher_name, matcher_info))
+
+        return info
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/eric6/DebugClients/Python/coverage/jsonreport.py	Sat Feb 22 14:27:42 2020 +0100
@@ -0,0 +1,99 @@
+# coding: utf-8
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Json reporting for coverage.py"""
+import datetime
+import json
+import sys
+
+from coverage import __version__
+from coverage.report import get_analysis_to_report
+from coverage.results import Numbers
+
+
+class JsonReporter(object):
+    """A reporter for writing JSON coverage results."""
+
+    def __init__(self, coverage):
+        self.coverage = coverage
+        self.config = self.coverage.config
+        self.total = Numbers()
+        self.report_data = {}
+
+    def report(self, morfs, outfile=None):
+        """Generate a json report for `morfs`.
+
+        `morfs` is a list of modules or file names.
+
+        `outfile` is a file object to write the json to
+
+        """
+        outfile = outfile or sys.stdout
+        coverage_data = self.coverage.get_data()
+        coverage_data.set_query_contexts(self.config.report_contexts)
+        self.report_data["meta"] = {
+            "version": __version__,
+            "timestamp": datetime.datetime.now().isoformat(),
+            "branch_coverage": coverage_data.has_arcs(),
+            "show_contexts": self.config.json_show_contexts,
+        }
+
+        measured_files = {}
+        for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
+            measured_files[file_reporter.relative_filename()] = self.report_one_file(
+                coverage_data,
+                analysis
+            )
+
+        self.report_data["files"] = measured_files
+
+        self.report_data["totals"] = {
+            'covered_lines': self.total.n_executed,
+            'num_statements': self.total.n_statements,
+            'percent_covered': self.total.pc_covered,
+            'missing_lines': self.total.n_missing,
+            'excluded_lines': self.total.n_excluded,
+        }
+
+        if coverage_data.has_arcs():
+            self.report_data["totals"].update({
+                'num_branches': self.total.n_branches,
+                'num_partial_branches': self.total.n_partial_branches,
+            })
+
+        json.dump(
+            self.report_data,
+            outfile,
+            indent=4 if self.config.json_pretty_print else None
+        )
+
+        return self.total.n_statements and self.total.pc_covered
+
+    def report_one_file(self, coverage_data, analysis):
+        """Extract the relevant report data for a single file"""
+        nums = analysis.numbers
+        self.total += nums
+        summary = {
+            'covered_lines': nums.n_executed,
+            'num_statements': nums.n_statements,
+            'percent_covered': nums.pc_covered,
+            'missing_lines': nums.n_missing,
+            'excluded_lines': nums.n_excluded,
+        }
+        reported_file = {
+            'executed_lines': sorted(analysis.executed),
+            'summary': summary,
+            'missing_lines': sorted(analysis.missing),
+            'excluded_lines': sorted(analysis.excluded)
+        }
+        if self.config.json_show_contexts:
+            reported_file['contexts'] = analysis.data.contexts_by_lineno(
+                analysis.filename,
+            )
+        if coverage_data.has_arcs():
+            reported_file['summary'].update({
+                'num_branches': nums.n_branches,
+                'num_partial_branches': nums.n_partial_branches,
+            })
+        return reported_file
--- a/eric6/DebugClients/Python/coverage/misc.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/misc.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Miscellaneous stuff for coverage.py."""
 
@@ -8,6 +8,10 @@
 import inspect
 import locale
 import os
+import os.path
+import random
+import re
+import socket
 import sys
 import types
 
@@ -45,9 +49,14 @@
     return _decorator
 
 
+# Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging
+# tests to remove noise from stack traces.
+# $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces.
+USE_CONTRACTS = env.TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0)))
+
 # Use PyContracts for assertion testing on parameters and returns, but only if
 # we are running our own test suite.
-if env.TESTING:
+if USE_CONTRACTS:
     from contracts import contract              # pylint: disable=unused-import
     from contracts import new_contract as raw_new_contract
 
@@ -69,11 +78,11 @@
         """Ensure that only one of the argnames is non-None."""
         def _decorator(func):
             argnameset = set(name.strip() for name in argnames.split(","))
-            def _wrapped(*args, **kwargs):
+            def _wrapper(*args, **kwargs):
                 vals = [kwargs.get(name) for name in argnameset]
                 assert sum(val is not None for val in vals) == 1
                 return func(*args, **kwargs)
-            return _wrapped
+            return _wrapper
         return _decorator
 else:                                           # pragma: not testing
     # We aren't using real PyContracts, so just define our decorators as
@@ -100,44 +109,6 @@
         return "%d-%d" % (start, end)
 
 
-def format_lines(statements, lines):
-    """Nicely format a list of line numbers.
-
-    Format a list of line numbers for printing by coalescing groups of lines as
-    long as the lines represent consecutive statements.  This will coalesce
-    even if there are gaps between statements.
-
-    For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
-    `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
-
-    Both `lines` and `statements` can be any iterable. All of the elements of
-    `lines` must be in `statements`, and all of the values must be positive
-    integers.
-
-    """
-    statements = sorted(statements)
-    lines = sorted(lines)
-
-    pairs = []
-    start = None
-    lidx = 0
-    for stmt in statements:
-        if lidx >= len(lines):
-            break
-        if stmt == lines[lidx]:
-            lidx += 1
-            if not start:
-                start = stmt
-            end = stmt
-        elif start:
-            pairs.append((start, end))
-            start = None
-    if start:
-        pairs.append((start, end))
-    ret = ', '.join(map(nice_pair, pairs))
-    return ret
-
-
 def expensive(fn):
     """A decorator to indicate that a method shouldn't be called more than once.
 
@@ -148,13 +119,12 @@
     if env.TESTING:
         attr = "_once_" + fn.__name__
 
-        def _wrapped(self):
-            """Inner function that checks the cache."""
+        def _wrapper(self):
             if hasattr(self, attr):
                 raise AssertionError("Shouldn't have called %s more than once" % fn.__name__)
             setattr(self, attr, True)
             return fn(self)
-        return _wrapped
+        return _wrapper
     else:
         return fn                   # pragma: not testing
 
@@ -181,6 +151,20 @@
             raise
 
 
+def ensure_dir(directory):
+    """Make sure the directory exists.
+
+    If `directory` is None or empty, do nothing.
+    """
+    if directory and not os.path.isdir(directory):
+        os.makedirs(directory)
+
+
+def ensure_dir_for_file(path):
+    """Make sure the directory for the path exists."""
+    ensure_dir(os.path.dirname(path))
+
+
 def output_encoding(outfile=None):
     """Determine the encoding to use for output written to `outfile` or stdout."""
     if outfile is None:
@@ -193,6 +177,26 @@
     return encoding
 
 
+def filename_suffix(suffix):
+    """Compute a filename suffix for a data file.
+
+    If `suffix` is a string or None, simply return it. If `suffix` is True,
+    then build a suffix incorporating the hostname, process id, and a random
+    number.
+
+    Returns a string or None.
+
+    """
+    if suffix is True:
+        # If data_suffix was a simple true value, then make a suffix with
+        # plenty of distinguishing information.  We do this here in
+        # `save()` at the last minute so that the pid will be correct even
+        # if the process forks.
+        dice = random.Random(os.urandom(8)).randint(0, 999999)
+        suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
+    return suffix
+
+
 class Hasher(object):
     """Hashes Python data into md5."""
     def __init__(self):
@@ -226,6 +230,7 @@
                     continue
                 self.update(k)
                 self.update(a)
+        self.md5.update(b'.')
 
     def hexdigest(self):
         """Retrieve the hex digest of the hash."""
@@ -249,14 +254,67 @@
         )
 
 
-class SimpleRepr(object):
-    """A mixin implementing a simple __repr__."""
+class DefaultValue(object):
+    """A sentinel object to use for unusual default-value needs.
+
+    Construct with a string that will be used as the repr, for display in help
+    and Sphinx output.
+
+    """
+    def __init__(self, display_as):
+        self.display_as = display_as
+
     def __repr__(self):
-        return "<{klass} @{id:x} {attrs}>".format(
-            klass=self.__class__.__name__,
-            id=id(self) & 0xFFFFFF,
-            attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
-            )
+        return self.display_as
+
+
+def substitute_variables(text, variables):
+    """Substitute ``${VAR}`` variables in `text` with their values.
+
+    Variables in the text can take a number of shell-inspired forms::
+
+        $VAR
+        ${VAR}
+        ${VAR?}             strict: an error if VAR isn't defined.
+        ${VAR-missing}      defaulted: "missing" if VAR isn't defined.
+        $$                  just a dollar sign.
+
+    `variables` is a dictionary of variable values.
+
+    Returns the resulting text with values substituted.
+
+    """
+    dollar_pattern = r"""(?x)   # Use extended regex syntax
+        \$                      # A dollar sign,
+        (?:                     # then
+            (?P<dollar>\$) |        # a dollar sign, or
+            (?P<word1>\w+) |        # a plain word, or
+            {                       # a {-wrapped
+                (?P<word2>\w+)          # word,
+                (?:
+                    (?P<strict>\?) |        # with a strict marker
+                    -(?P<defval>[^}]*)      # or a default value
+                )?                      # maybe.
+            }
+        )
+        """
+
+    def dollar_replace(match):
+        """Called for each $replacement."""
+        # Only one of the groups will have matched, just get its text.
+        word = next(g for g in match.group('dollar', 'word1', 'word2') if g)
+        if word == "$":
+            return "$"
+        elif word in variables:
+            return variables[word]
+        elif match.group('strict'):
+            msg = "Variable {} is undefined: {!r}".format(word, text)
+            raise CoverageException(msg)
+        else:
+            return match.group('defval')
+
+    text = re.sub(dollar_pattern, dollar_replace, text)
+    return text
 
 
 class BaseCoverageException(Exception):
@@ -265,7 +323,7 @@
 
 
 class CoverageException(BaseCoverageException):
-    """A run-of-the-mill exception specific to coverage.py."""
+    """An exception raised by a coverage.py function."""
     pass
 
 
--- a/eric6/DebugClients/Python/coverage/multiproc.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/multiproc.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,43 +1,49 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Monkey-patching to add multiprocessing support for coverage.py"""
 
 import multiprocessing
 import multiprocessing.process
 import os
+import os.path
 import sys
+import traceback
 
+from coverage import env
 from coverage.misc import contract
 
 # An attribute that will be set on the module to indicate that it has been
 # monkey-patched.
 PATCHED_MARKER = "_coverage$patched"
 
-# The environment variable that specifies the rcfile for subprocesses.
-COVERAGE_RCFILE_ENV = "_COVERAGE_RCFILE"
 
-
-if sys.version_info >= (3, 4):
+if env.PYVERSION >= (3, 4):
     OriginalProcess = multiprocessing.process.BaseProcess
 else:
     OriginalProcess = multiprocessing.Process
 
 original_bootstrap = OriginalProcess._bootstrap
 
-class ProcessWithCoverage(OriginalProcess):
+class ProcessWithCoverage(OriginalProcess):         # pylint: disable=abstract-method
     """A replacement for multiprocess.Process that starts coverage."""
 
     def _bootstrap(self, *args, **kwargs):          # pylint: disable=arguments-differ
         """Wrapper around _bootstrap to start coverage."""
-        from coverage import Coverage       # avoid circular import
-        rcfile = os.environ[COVERAGE_RCFILE_ENV]
-        cov = Coverage(data_suffix=True, config_file=rcfile)
-        cov.start()
-        debug = cov.debug
         try:
+            from coverage import Coverage       # avoid circular import
+            cov = Coverage(data_suffix=True)
+            cov._warn_preimported_source = False
+            cov.start()
+            debug = cov._debug
             if debug.should("multiproc"):
                 debug.write("Calling multiprocessing bootstrap")
+        except Exception:
+            print("Exception during multiprocessing bootstrap init:")
+            traceback.print_exc(file=sys.stdout)
+            sys.stdout.flush()
+            raise
+        try:
             return original_bootstrap(self, *args, **kwargs)
         finally:
             if debug.should("multiproc"):
@@ -73,14 +79,14 @@
     if hasattr(multiprocessing, PATCHED_MARKER):
         return
 
-    if sys.version_info >= (3, 4):
+    if env.PYVERSION >= (3, 4):
         OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap
     else:
         multiprocessing.Process = ProcessWithCoverage
 
     # Set the value in ProcessWithCoverage that will be pickled into the child
     # process.
-    os.environ[COVERAGE_RCFILE_ENV] = rcfile
+    os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile)
 
     # When spawning processes rather than forking them, we have no state in the
     # new process.  We sneak in there with a Stowaway: we stuff one of our own
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/eric6/DebugClients/Python/coverage/numbits.py	Sat Feb 22 14:27:42 2020 +0100
@@ -0,0 +1,163 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""
+Functions to manipulate packed binary representations of number sets.
+
+To save space, coverage stores sets of line numbers in SQLite using a packed
+binary representation called a numbits.  A numbits is a set of positive
+integers.
+
+A numbits is stored as a blob in the database.  The exact meaning of the bytes
+in the blobs should be considered an implementation detail that might change in
+the future.  Use these functions to work with those binary blobs of data.
+
+"""
+import json
+
+from coverage import env
+from coverage.backward import byte_to_int, bytes_to_ints, binary_bytes, zip_longest
+from coverage.misc import contract, new_contract
+
+if env.PY3:
+    def _to_blob(b):
+        """Convert a bytestring into a type SQLite will accept for a blob."""
+        return b
+
+    new_contract('blob', lambda v: isinstance(v, bytes))
+else:
+    def _to_blob(b):
+        """Convert a bytestring into a type SQLite will accept for a blob."""
+        return buffer(b)                                    # pylint: disable=undefined-variable
+
+    new_contract('blob', lambda v: isinstance(v, buffer))   # pylint: disable=undefined-variable
+
+
+@contract(nums='Iterable', returns='blob')
+def nums_to_numbits(nums):
+    """Convert `nums` into a numbits.
+
+    Arguments:
+        nums: a reusable iterable of integers, the line numbers to store.
+
+    Returns:
+        A binary blob.
+    """
+    try:
+        nbytes = max(nums) // 8 + 1
+    except ValueError:
+        # nums was empty.
+        return _to_blob(b'')
+    b = bytearray(nbytes)
+    for num in nums:
+        b[num//8] |= 1 << num % 8
+    return _to_blob(bytes(b))
+
+
+@contract(numbits='blob', returns='list[int]')
+def numbits_to_nums(numbits):
+    """Convert a numbits into a list of numbers.
+
+    Arguments:
+        numbits: a binary blob, the packed number set.
+
+    Returns:
+        A list of ints.
+
+    When registered as a SQLite function by :func:`register_sqlite_functions`,
+    this returns a string, a JSON-encoded list of ints.
+
+    """
+    nums = []
+    for byte_i, byte in enumerate(bytes_to_ints(numbits)):
+        for bit_i in range(8):
+            if (byte & (1 << bit_i)):
+                nums.append(byte_i * 8 + bit_i)
+    return nums
+
+
+@contract(numbits1='blob', numbits2='blob', returns='blob')
+def numbits_union(numbits1, numbits2):
+    """Compute the union of two numbits.
+
+    Returns:
+        A new numbits, the union of `numbits1` and `numbits2`.
+    """
+    byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
+    return _to_blob(binary_bytes(b1 | b2 for b1, b2 in byte_pairs))
+
+
+@contract(numbits1='blob', numbits2='blob', returns='blob')
+def numbits_intersection(numbits1, numbits2):
+    """Compute the intersection of two numbits.
+
+    Returns:
+        A new numbits, the intersection `numbits1` and `numbits2`.
+    """
+    byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
+    intersection_bytes = binary_bytes(b1 & b2 for b1, b2 in byte_pairs)
+    return _to_blob(intersection_bytes.rstrip(b'\0'))
+
+
+@contract(numbits1='blob', numbits2='blob', returns='bool')
+def numbits_any_intersection(numbits1, numbits2):
+    """Is there any number that appears in both numbits?
+
+    Determine whether two number sets have a non-empty intersection. This is
+    faster than computing the intersection.
+
+    Returns:
+        A bool, True if there is any number in both `numbits1` and `numbits2`.
+    """
+    byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
+    return any(b1 & b2 for b1, b2 in byte_pairs)
+
+
+@contract(num='int', numbits='blob', returns='bool')
+def num_in_numbits(num, numbits):
+    """Does the integer `num` appear in `numbits`?
+
+    Returns:
+        A bool, True if `num` is a member of `numbits`.
+    """
+    nbyte, nbit = divmod(num, 8)
+    if nbyte >= len(numbits):
+        return False
+    return bool(byte_to_int(numbits[nbyte]) & (1 << nbit))
+
+
+def register_sqlite_functions(connection):
+    """
+    Define numbits functions in a SQLite connection.
+
+    This defines these functions for use in SQLite statements:
+
+    * :func:`numbits_union`
+    * :func:`numbits_intersection`
+    * :func:`numbits_any_intersection`
+    * :func:`num_in_numbits`
+    * :func:`numbits_to_nums`
+
+    `connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>`
+    object.  After creating the connection, pass it to this function to
+    register the numbits functions.  Then you can use numbits functions in your
+    queries::
+
+        import sqlite3
+        from coverage.numbits import register_sqlite_functions
+
+        conn = sqlite3.connect('example.db')
+        register_sqlite_functions(conn)
+        c = conn.cursor()
+        # Kind of a nonsense query: find all the files and contexts that
+        # executed line 47 in any file:
+        c.execute(
+            "select file_id, context_id from line_bits where num_in_numbits(?, numbits)",
+            (47,)
+        )
+    """
+    connection.create_function("numbits_union", 2, numbits_union)
+    connection.create_function("numbits_intersection", 2, numbits_intersection)
+    connection.create_function("numbits_any_intersection", 2, numbits_any_intersection)
+    connection.create_function("num_in_numbits", 2, num_in_numbits)
+    connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/eric6/DebugClients/Python/coverage/optional.py	Sat Feb 22 14:27:42 2020 +0100
@@ -0,0 +1,68 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""
+Imports that we need at runtime, but might not be present.
+
+When importing one of these modules, always do it in the function where you
+need the module.  Some tests will need to remove the module.  If you import
+it at the top level of your module, then the test won't be able to simulate
+the module being unimportable.
+
+The import will always succeed, but the value will be None if the module is
+unavailable.
+
+Bad::
+
+    # MyModule.py
+    from coverage.optional import unsure
+
+    def use_unsure():
+        unsure.something()
+
+Good::
+
+    # MyModule.py
+
+    def use_unsure():
+        from coverage.optional import unsure
+        if unsure is None:
+            raise Exception("Module unsure isn't available!")
+
+        unsure.something()
+
+"""
+
+import contextlib
+
+# This file's purpose is to provide modules to be imported from here.
+# pylint: disable=unused-import
+
+# TOML support is an install-time extra option.
+try:
+    import toml
+except ImportError:         # pragma: not covered
+    toml = None
+
+
+@contextlib.contextmanager
+def without(modname):
+    """Hide a module for testing.
+
+    Use this in a test function to make an optional module unavailable during
+    the test::
+
+        with coverage.optional.without('toml'):
+            use_toml_somehow()
+
+    Arguments:
+        modname (str): the name of a module importable from
+            `coverage.optional`.
+
+    """
+    real_module = globals()[modname]
+    try:
+        globals()[modname] = None
+        yield
+    finally:
+        globals()[modname] = real_module
--- a/eric6/DebugClients/Python/coverage/parser.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/parser.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Code parsing for coverage.py."""
 
@@ -13,7 +13,7 @@
 from coverage import env
 from coverage.backward import range    # pylint: disable=redefined-builtin
 from coverage.backward import bytes_to_ints, string_class
-from coverage.bytecode import CodeObjects
+from coverage.bytecode import code_objects
 from coverage.debug import short_stack
 from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of
 from coverage.misc import NoSource, NotPython, StopEverything
@@ -207,7 +207,11 @@
 
     def first_line(self, line):
         """Return the first line number of the statement including `line`."""
-        return self._multiline.get(line, line)
+        if line < 0:
+            line = -self._multiline.get(-line, -line)
+        else:
+            line = self._multiline.get(line, line)
+        return line
 
     def first_lines(self, lines):
         """Map the line numbers in `lines` to the correct first line of the
@@ -383,8 +387,7 @@
         The iteration includes `self` as its first value.
 
         """
-        children = CodeObjects(self.code)
-        return (ByteParser(self.text, code=c) for c in children)
+        return (ByteParser(self.text, code=c) for c in code_objects(self.code))
 
     def _bytes_lines(self):
         """Map byte offsets to line numbers in `code`.
@@ -409,7 +412,7 @@
                     yield (byte_num, line_num)
                     last_line_num = line_num
                 byte_num += byte_incr
-            if env.PYVERSION >= (3, 6) and line_incr >= 0x80:
+            if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80:
                 line_incr -= 0x100
             line_num += line_incr
         if line_num != last_line_num:
@@ -491,6 +494,7 @@
 
 
 # Turn on AST dumps with an environment variable.
+# $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code.
 AST_DUMP = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0)))
 
 class NodeList(object):
@@ -521,8 +525,8 @@
 
         if AST_DUMP:                                # pragma: debugging
             # Dump the AST so that failing tests have helpful output.
-            print("Statements: {0}".format(self.statements))
-            print("Multiline map: {0}".format(self.multiline))
+            print("Statements: {}".format(self.statements))
+            print("Multiline map: {}".format(self.multiline))
             ast_dump(self.root_node)
 
         self.arcs = set()
@@ -535,6 +539,7 @@
         self.missing_arc_fragments = collections.defaultdict(list)
         self.block_stack = []
 
+        # $set_env.py: COVERAGE_TRACK_ARCS - Trace every arc added while parsing code.
         self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
 
     def analyze(self):
@@ -652,7 +657,7 @@
             # to see if it's overlooked.
             if 0:
                 if node_name not in self.OK_TO_DEFAULT:
-                    print("*** Unhandled: {0}".format(node))
+                    print("*** Unhandled: {}".format(node))
 
             # Default for simple statements: one exit from this node.
             return set([ArcStart(self.line_for_node(node))])
@@ -822,7 +827,7 @@
                 for xit in exits:
                     self.add_arc(
                         xit.lineno, -block.start, xit.cause,
-                        "didn't except from function '{0}'".format(block.name),
+                        "didn't except from function {!r}".format(block.name),
                     )
                 break
 
@@ -837,7 +842,7 @@
                 for xit in exits:
                     self.add_arc(
                         xit.lineno, -block.start, xit.cause,
-                        "didn't return from function '{0}'".format(block.name),
+                        "didn't return from function {!r}".format(block.name),
                     )
                 break
 
@@ -1160,17 +1165,17 @@
         for xit in exits:
             self.add_arc(
                 xit.lineno, -start, xit.cause,
-                "didn't exit the body of class '{0}'".format(node.name),
+                "didn't exit the body of class {!r}".format(node.name),
             )
 
     def _make_oneline_code_method(noun):     # pylint: disable=no-self-argument
         """A function to make methods for online callable _code_object__ methods."""
         def _code_object__oneline_callable(self, node):
             start = self.line_for_node(node)
-            self.add_arc(-start, start, None, "didn't run the {0} on line {1}".format(noun, start))
+            self.add_arc(-start, start, None, "didn't run the {} on line {}".format(noun, start))
             self.add_arc(
                 start, -start, None,
-                "didn't finish the {0} on line {1}".format(noun, start),
+                "didn't finish the {} on line {}".format(noun, start),
             )
         return _code_object__oneline_callable
 
@@ -1202,15 +1207,15 @@
         """
         indent = " " * depth
         if not isinstance(node, ast.AST):
-            print("{0}<{1} {2!r}>".format(indent, node.__class__.__name__, node))
+            print("{}<{} {!r}>".format(indent, node.__class__.__name__, node))
             return
 
         lineno = getattr(node, "lineno", None)
         if lineno is not None:
-            linemark = " @ {0}".format(node.lineno)
+            linemark = " @ {}".format(node.lineno)
         else:
             linemark = ""
-        head = "{0}<{1}{2}".format(indent, node.__class__.__name__, linemark)
+        head = "{}<{}{}".format(indent, node.__class__.__name__, linemark)
 
         named_fields = [
             (name, value)
@@ -1218,28 +1223,28 @@
             if name not in SKIP_DUMP_FIELDS
         ]
         if not named_fields:
-            print("{0}>".format(head))
+            print("{}>".format(head))
         elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
             field_name, value = named_fields[0]
-            print("{0} {1}: {2!r}>".format(head, field_name, value))
+            print("{} {}: {!r}>".format(head, field_name, value))
         else:
             print(head)
             if 0:
-                print("{0}# mro: {1}".format(
+                print("{}# mro: {}".format(
                     indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
                 ))
             next_indent = indent + "    "
             for field_name, value in named_fields:
-                prefix = "{0}{1}:".format(next_indent, field_name)
+                prefix = "{}{}:".format(next_indent, field_name)
                 if _is_simple_value(value):
-                    print("{0} {1!r}".format(prefix, value))
+                    print("{} {!r}".format(prefix, value))
                 elif isinstance(value, list):
-                    print("{0} [".format(prefix))
+                    print("{} [".format(prefix))
                     for n in value:
                         ast_dump(n, depth + 8)
-                    print("{0}]".format(next_indent))
+                    print("{}]".format(next_indent))
                 else:
                     print(prefix)
                     ast_dump(value, depth + 8)
 
-            print("{0}>".format(indent))
+            print("{}>".format(indent))
--- a/eric6/DebugClients/Python/coverage/phystokens.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/phystokens.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Better tokenizing for coverage.py."""
 
@@ -27,7 +27,7 @@
     """
     last_line = None
     last_lineno = -1
-    last_ttype = None
+    last_ttext = None
     for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
         if last_lineno != elineno:
             if last_line and last_line.endswith("\\\n"):
@@ -47,9 +47,7 @@
                 # so we need to figure out if the backslash is already in the
                 # string token or not.
                 inject_backslash = True
-                if last_ttype == tokenize.COMMENT:
-                    # Comments like this \
-                    # should never result in a new token.
+                if last_ttext.endswith("\\"):
                     inject_backslash = False
                 elif ttype == token.STRING:
                     if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
@@ -66,7 +64,8 @@
                         last_line
                         )
             last_line = ltext
-            last_ttype = ttype
+        if ttype not in (tokenize.NEWLINE, tokenize.NL):
+            last_ttext = ttext
         yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
         last_lineno = elineno
 
--- a/eric6/DebugClients/Python/coverage/pickle2json.py	Wed Feb 19 19:38:36 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Convert pickle to JSON for coverage.py."""
-
-from coverage.backward import pickle
-from coverage.data import CoverageData
-
-
-def pickle_read_raw_data(cls_unused, file_obj):
-    """Replacement for CoverageData._read_raw_data."""
-    return pickle.load(file_obj)
-
-
-def pickle2json(infile, outfile):
-    """Convert a coverage.py 3.x pickle data file to a 4.x JSON data file."""
-    try:
-        old_read_raw_data = CoverageData._read_raw_data
-        CoverageData._read_raw_data = pickle_read_raw_data
-
-        covdata = CoverageData()
-
-        with open(infile, 'rb') as inf:
-            covdata.read_fileobj(inf)
-
-        covdata.write_file(outfile)
-    finally:
-        CoverageData._read_raw_data = old_read_raw_data
-
-
-if __name__ == "__main__":
-    from optparse import OptionParser
-
-    parser = OptionParser(usage="usage: %s [options]" % __file__)
-    parser.description = "Convert .coverage files from pickle to JSON format"
-    parser.add_option(
-        "-i", "--input-file", action="store", default=".coverage",
-        help="Name of input file. Default .coverage",
-    )
-    parser.add_option(
-        "-o", "--output-file", action="store", default=".coverage",
-        help="Name of output file. Default .coverage",
-    )
-
-    (options, args) = parser.parse_args()
-
-    pickle2json(options.input_file, options.output_file)
--- a/eric6/DebugClients/Python/coverage/plugin.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/plugin.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """
 .. versionadded:: 4.0
@@ -14,6 +14,9 @@
 * Configurers add custom configuration, using Python code to change the
   configuration.
 
+* Dynamic context switchers decide when the dynamic context has changed, for
+  example, to record what test function produced the coverage.
+
 To write a coverage.py plug-in, create a module with a subclass of
 :class:`~coverage.CoveragePlugin`.  You will override methods in your class to
 participate in various aspects of coverage.py's processing.
@@ -54,6 +57,8 @@
     your importable Python package.
 
 
+.. _file_tracer_plugins:
+
 File Tracers
 ============
 
@@ -66,6 +71,8 @@
 register your file tracer.
 
 
+.. _configurer_plugins:
+
 Configurers
 ===========
 
@@ -78,6 +85,31 @@
 In your ``coverage_init`` function, use the ``add_configurer`` method to
 register your configurer.
 
+
+.. _dynamic_context_plugins:
+
+Dynamic Context Switchers
+=========================
+
+.. versionadded:: 5.0
+
+Dynamic context switcher plugins implement the
+:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute
+the context label for each measured frame.
+
+Computed context labels are useful when you want to group measured data without
+modifying the source code.
+
+For example, you could write a plugin that checks `frame.f_code` to inspect
+the currently executed method, and set the context label to a fully qualified
+method name if it's an instance method of `unittest.TestCase` and the method
+name starts with 'test'.  Such a plugin would provide basic coverage grouping
+by test and could be used with test runners that have no built-in coveragepy
+support.
+
+In your ``coverage_init`` function, use the ``add_dynamic_context`` method to
+register your dynamic context switcher.
+
 """
 
 from coverage import files
@@ -94,13 +126,14 @@
 
         Every Python source file is offered to your plug-in to give it a chance
         to take responsibility for tracing the file.  If your plug-in can
-        handle the file, then return a :class:`FileTracer` object.  Otherwise
-        return None.
+        handle the file, it should return a :class:`FileTracer` object.
+        Otherwise return None.
 
         There is no way to register your plug-in for particular files.
-        Instead, this method is invoked for all files, and the plug-in decides
-        whether it can trace the file or not.  Be prepared for `filename` to
-        refer to all kinds of files that have nothing to do with your plug-in.
+        Instead, this method is invoked for all  files as they are executed,
+        and the plug-in decides whether it can trace the file or not.
+        Be prepared for `filename` to refer to all kinds of files that have
+        nothing to do with your plug-in.
 
         The file name will be a Python file being executed.  There are two
         broad categories of behavior for a plug-in, depending on the kind of
@@ -134,11 +167,28 @@
         This will only be invoked if `filename` returns non-None from
         :meth:`file_tracer`.  It's an error to return None from this method.
 
-        Returns a :class:`FileReporter` object to use to report on `filename`.
+        Returns a :class:`FileReporter` object to use to report on `filename`,
+        or the string `"python"` to have coverage.py treat the file as Python.
 
         """
         _needs_to_implement(self, "file_reporter")
 
+    def dynamic_context(self, frame):       # pylint: disable=unused-argument
+        """Get the dynamically computed context label for `frame`.
+
+        Plug-in type: dynamic context.
+
+        This method is invoked for each frame when outside of a dynamic
+        context, to see if a new dynamic context should be started.  If it
+        returns a string, a new context label is set for this and deeper
+        frames.  The dynamic context ends when this frame returns.
+
+        Returns a string to start a new dynamic context, or None if no new
+        context should be started.
+
+        """
+        return None
+
     def find_executable_files(self, src_dir):       # pylint: disable=unused-argument
         """Yield all of the executable files in `src_dir`, recursively.
 
--- a/eric6/DebugClients/Python/coverage/plugin_support.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/plugin_support.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Support for plugins."""
 
@@ -21,6 +21,7 @@
         self.names = {}
         self.file_tracers = []
         self.configurers = []
+        self.context_switchers = []
 
         self.current_module = None
         self.debug = None
@@ -70,6 +71,15 @@
         """
         self._add_plugin(plugin, self.configurers)
 
+    def add_dynamic_context(self, plugin):
+        """Add a dynamic context plugin.
+
+        `plugin` is an instance of a third-party plugin class.  It must
+        implement the :meth:`CoveragePlugin.dynamic_context` method.
+
+        """
+        self._add_plugin(plugin, self.context_switchers)
+
     def add_noop(self, plugin):
         """Add a plugin that does nothing.
 
@@ -157,6 +167,20 @@
             reporter = DebugFileReporterWrapper(filename, reporter, debug)
         return reporter
 
+    def dynamic_context(self, frame):
+        context = self.plugin.dynamic_context(frame)
+        self.debug.write("dynamic_context(%r) --> %r" % (frame, context))
+        return context
+
+    def find_executable_files(self, src_dir):
+        executable_files = self.plugin.find_executable_files(src_dir)
+        self.debug.write("find_executable_files(%r) --> %r" % (src_dir, executable_files))
+        return executable_files
+
+    def configure(self, config):
+        self.debug.write("configure(%r)" % (config,))
+        self.plugin.configure(config)
+
     def sys_info(self):
         return self.plugin.sys_info()
 
--- a/eric6/DebugClients/Python/coverage/python.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/python.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Python source expertise for coverage.py"""
 
@@ -97,7 +97,7 @@
 
 
 def source_for_file(filename):
-    """Return the source file for `filename`.
+    """Return the source filename for `filename`.
 
     Given a file name being traced, return the best guess as to the source
     file to attribute it to.
@@ -129,22 +129,28 @@
     return filename
 
 
+def source_for_morf(morf):
+    """Get the source filename for the module-or-file `morf`."""
+    if hasattr(morf, '__file__') and morf.__file__:
+        filename = morf.__file__
+    elif isinstance(morf, types.ModuleType):
+        # A module should have had .__file__, otherwise we can't use it.
+        # This could be a PEP-420 namespace package.
+        raise CoverageException("Module {} has no file".format(morf))
+    else:
+        filename = morf
+
+    filename = source_for_file(files.unicode_filename(filename))
+    return filename
+
+
 class PythonFileReporter(FileReporter):
     """Report support for a Python file."""
 
     def __init__(self, morf, coverage=None):
         self.coverage = coverage
 
-        if hasattr(morf, '__file__') and morf.__file__:
-            filename = morf.__file__
-        elif isinstance(morf, types.ModuleType):
-            # A module should have had .__file__, otherwise we can't use it.
-            # This could be a PEP-420 namespace package.
-            raise CoverageException("Module {0} has no file".format(morf))
-        else:
-            filename = morf
-
-        filename = source_for_file(files.unicode_filename(filename))
+        filename = source_for_morf(morf)
 
         super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
 
@@ -160,11 +166,10 @@
 
         self._source = None
         self._parser = None
-        self._statements = None
         self._excluded = None
 
     def __repr__(self):
-        return "<PythonFileReporter {0!r}>".format(self.filename)
+        return "<PythonFileReporter {!r}>".format(self.filename)
 
     @contract(returns='unicode')
     def relative_filename(self):
--- a/eric6/DebugClients/Python/coverage/pytracer.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/pytracer.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,5 +1,5 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Raw data collector for coverage.py."""
 
@@ -40,6 +40,7 @@
         self.trace_arcs = False
         self.should_trace = None
         self.should_trace_cache = None
+        self.should_start_context = None
         self.warn = None
         # The threading module to use, if any.
         self.threading = None
@@ -47,6 +48,8 @@
         self.cur_file_dict = None
         self.last_line = 0          # int, but uninitialized.
         self.cur_file_name = None
+        self.context = None
+        self.started_context = False
 
         self.data_stack = []
         self.last_exc_back = None
@@ -60,7 +63,7 @@
         atexit.register(setattr, self, 'in_atexit', True)
 
     def __repr__(self):
-        return "<PyTracer at {0}: {1} lines in {2} files>".format(
+        return "<PyTracer at {}: {} lines in {} files>".format(
             id(self),
             sum(len(v) for v in self.data.values()),
             len(self.data),
@@ -83,7 +86,7 @@
 
         #self.log(":", frame.f_code.co_filename, frame.f_lineno, event)
 
-        if (self.stopped and sys.gettrace() == self._trace):
+        if (self.stopped and sys.gettrace() == self._trace):    # pylint: disable=comparison-with-callable
             # The PyTrace.stop() method has been called, possibly by another
             # thread, let's deactivate ourselves now.
             #self.log("X", frame.f_code.co_filename, frame.f_lineno)
@@ -96,14 +99,35 @@
                 if self.trace_arcs and self.cur_file_dict:
                     pair = (self.last_line, -self.last_exc_firstlineno)
                     self.cur_file_dict[pair] = None
-                self.cur_file_dict, self.cur_file_name, self.last_line = self.data_stack.pop()
+                self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
+                    self.data_stack.pop()
+                )
             self.last_exc_back = None
 
         if event == 'call':
-            # Entering a new function context.  Decide if we should trace
+            # Should we start a new context?
+            if self.should_start_context and self.context is None:
+                context_maybe = self.should_start_context(frame)
+                if context_maybe is not None:
+                    self.context = context_maybe
+                    self.started_context = True
+                    self.switch_context(self.context)
+                else:
+                    self.started_context = False
+            else:
+                self.started_context = False
+
+            # Entering a new frame.  Decide if we should trace
             # in this file.
             self._activity = True
-            self.data_stack.append((self.cur_file_dict, self.cur_file_name, self.last_line))
+            self.data_stack.append(
+                (
+                    self.cur_file_dict,
+                    self.cur_file_name,
+                    self.last_line,
+                    self.started_context,
+                )
+            )
             filename = frame.f_code.co_filename
             self.cur_file_name = filename
             disp = self.should_trace_cache.get(filename)
@@ -146,7 +170,13 @@
                     first = frame.f_code.co_firstlineno
                     self.cur_file_dict[(self.last_line, -first)] = None
             # Leaving this function, pop the filename stack.
-            self.cur_file_dict, self.cur_file_name, self.last_line = self.data_stack.pop()
+            self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
+                self.data_stack.pop()
+            )
+            # Leaving a context?
+            if self.started_context:
+                self.context = None
+                self.switch_context(None)
         elif event == 'exception':
             self.last_exc_back = frame.f_back
             self.last_exc_firstlineno = frame.f_code.co_firstlineno
@@ -175,7 +205,7 @@
 
     def stop(self):
         """Stop this Tracer."""
-        # Get the activate tracer callback before setting the stop flag to be
+        # Get the active tracer callback before setting the stop flag to be
         # able to detect if the tracer was changed prior to stopping it.
         tf = sys.gettrace()
 
@@ -196,7 +226,7 @@
             # so don't warn if we are in atexit on PyPy and the trace function
             # has changed to None.
             dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None)
-            if (not dont_warn) and tf != self._trace:
+            if (not dont_warn) and tf != self._trace:   # pylint: disable=comparison-with-callable
                 self.warn(
                     "Trace function changed, measurement is likely wrong: %r" % (tf,),
                     slug="trace-changed",
--- a/eric6/DebugClients/Python/coverage/report.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/report.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,104 +1,86 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Reporter foundation for coverage.py."""
-
-import os
-import warnings
+import sys
 
+from coverage import env
 from coverage.files import prep_patterns, FnmatchMatcher
-from coverage.misc import CoverageException, NoSource, NotPython, isolate_module
-
-os = isolate_module(os)
+from coverage.misc import CoverageException, NoSource, NotPython, ensure_dir_for_file, file_be_gone
 
 
-class Reporter(object):
-    """A base class for all reporters."""
-
-    def __init__(self, coverage, config):
-        """Create a reporter.
-
-        `coverage` is the coverage instance. `config` is an instance  of
-        CoverageConfig, for controlling all sorts of behavior.
-
-        """
-        self.coverage = coverage
-        self.config = config
-
-        # The directory into which to place the report, used by some derived
-        # classes.
-        self.directory = None
+def render_report(output_path, reporter, morfs):
+    """Run the provided reporter ensuring any required setup and cleanup is done
 
-        # Our method find_file_reporters used to set an attribute that other
-        # code could read.  That's been refactored away, but some third parties
-        # were using that attribute.  We'll continue to support it in a noisy
-        # way for now.
-        self._file_reporters = []
+    At a high level this method ensures the output file is ready to be written to. Then writes the
+    report to it. Then closes the file and deletes any garbage created if necessary.
+    """
+    file_to_close = None
+    delete_file = False
+    if output_path:
+        if output_path == '-':
+            outfile = sys.stdout
+        else:
+            # Ensure that the output directory is created; done here
+            # because this report pre-opens the output file.
+            # HTMLReport does this using the Report plumbing because
+            # its task is more complex, being multiple files.
+            ensure_dir_for_file(output_path)
+            open_kwargs = {}
+            if env.PY3:
+                open_kwargs['encoding'] = 'utf8'
+            outfile = open(output_path, "w", **open_kwargs)
+            file_to_close = outfile
+    try:
+        return reporter.report(morfs, outfile=outfile)
+    except CoverageException:
+        delete_file = True
+        raise
+    finally:
+        if file_to_close:
+            file_to_close.close()
+            if delete_file:
+                file_be_gone(output_path)
 
-    @property
-    def file_reporters(self):
-        """Keep .file_reporters working for private-grabbing tools."""
-        warnings.warn(
-            "Report.file_reporters will no longer be available in Coverage.py 4.2",
-            DeprecationWarning,
-        )
-        return self._file_reporters
 
-    def find_file_reporters(self, morfs):
-        """Find the FileReporters we'll report on.
-
-        `morfs` is a list of modules or file names.
-
-        Returns a list of FileReporters.
-
-        """
-        reporters = self.coverage._get_file_reporters(morfs)
+def get_analysis_to_report(coverage, morfs):
+    """Get the files to report on.
 
-        if self.config.report_include:
-            matcher = FnmatchMatcher(prep_patterns(self.config.report_include))
-            reporters = [fr for fr in reporters if matcher.match(fr.filename)]
+    For each morf in `morfs`, if it should be reported on (based on the omit
+    and include configuration options), yield a pair, the `FileReporter` and
+    `Analysis` for the morf.
 
-        if self.config.report_omit:
-            matcher = FnmatchMatcher(prep_patterns(self.config.report_omit))
-            reporters = [fr for fr in reporters if not matcher.match(fr.filename)]
-
-        self._file_reporters = sorted(reporters)
-        return self._file_reporters
+    """
+    file_reporters = coverage._get_file_reporters(morfs)
+    config = coverage.config
 
-    def report_files(self, report_fn, morfs, directory=None):
-        """Run a reporting function on a number of morfs.
+    if config.report_include:
+        matcher = FnmatchMatcher(prep_patterns(config.report_include))
+        file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
 
-        `report_fn` is called for each relative morf in `morfs`.  It is called
-        as::
+    if config.report_omit:
+        matcher = FnmatchMatcher(prep_patterns(config.report_omit))
+        file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
 
-            report_fn(file_reporter, analysis)
-
-        where `file_reporter` is the `FileReporter` for the morf, and
-        `analysis` is the `Analysis` for the morf.
+    if not file_reporters:
+        raise CoverageException("No data to report.")
 
-        """
-        file_reporters = self.find_file_reporters(morfs)
-
-        if not file_reporters:
-            raise CoverageException("No data to report.")
-
-        self.directory = directory
-        if self.directory and not os.path.exists(self.directory):
-            os.makedirs(self.directory)
-
-        for fr in file_reporters:
-            try:
-                report_fn(fr, self.coverage._analyze(fr))
-            except NoSource:
-                if not self.config.ignore_errors:
+    for fr in sorted(file_reporters):
+        try:
+            analysis = coverage._analyze(fr)
+        except NoSource:
+            if not config.ignore_errors:
+                raise
+        except NotPython:
+            # Only report errors for .py files, and only if we didn't
+            # explicitly suppress those errors.
+            # NotPython is only raised by PythonFileReporter, which has a
+            # should_be_python() method.
+            if fr.should_be_python():
+                if config.ignore_errors:
+                    msg = "Couldn't parse Python file '{}'".format(fr.filename)
+                    coverage._warn(msg, slug="couldnt-parse")
+                else:
                     raise
-            except NotPython:
-                # Only report errors for .py files, and only if we didn't
-                # explicitly suppress those errors.
-                # NotPython is only raised by PythonFileReporter, which has a
-                # should_be_python() method.
-                if fr.should_be_python():
-                    if self.config.ignore_errors:
-                        self.coverage._warn("Could not parse Python file {0}".format(fr.filename))
-                    else:
-                        raise
+        else:
+            yield (fr, analysis)
--- a/eric6/DebugClients/Python/coverage/results.py	Wed Feb 19 19:38:36 2020 +0100
+++ b/eric6/DebugClients/Python/coverage/results.py	Sat Feb 22 14:27:42 2020 +0100
@@ -1,34 +1,36 @@
 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
 
 """Results of coverage measurement."""
 
 import collections
 
 from coverage.backward import iitems
-from coverage.misc import contract, format_lines, SimpleRepr
+from coverage.debug import SimpleReprMixin
+from coverage.misc import contract, CoverageException, nice_pair
 
 
 class Analysis(object):
     """The results of analyzing a FileReporter."""
 
-    def __init__(self, data, file_reporter):
+    def __init__(self, data, file_reporter, file_mapper):
         self.data = data
         self.file_reporter = file_reporter
-        self.filename = self.file_reporter.filename
+        self.filename = file_mapper(self.file_reporter.filename)
         self.statements = self.file_reporter.lines()
         self.excluded = self.file_reporter.excluded_lines()
 
         # Identify missing statements.
         executed = self.data.lines(self.filename) or []
         executed = self.file_reporter.translate_lines(executed)
-        self.missing = self.statements - executed
+        self.executed = executed
+        self.missing = self.statements - self.executed
 
         if self.data.has_arcs():
             self._arc_possibilities = sorted(self.file_reporter.arcs())
             self.exit_counts = self.file_reporter.exit_counts()
             self.no_branch = self.file_reporter.no_branch_lines()
-            n_branches = self.total_branches()
+            n_branches = self._total_branches()
             mba = self.missing_branch_arcs()
             n_partial_branches = sum(len(v) for k,v in iitems(mba) if k not in self.missing)
             n_missing_branches = sum(len(v) for k,v in iitems(mba))
@@ -48,28 +50,38 @@
             n_missing_branches=n_missing_branches,
         )
 
-    def missing_formatted(self):
+    def missing_formatted(self, branches=False):
         """The missing line numbers, formatted nicely.
 
         Returns a string like "1-2, 5-11, 13-14".
 
+        If `branches` is true, includes the missing branch arcs also.
+
         """
-        return format_lines(self.statements, self.missing)
+        if branches and self.has_arcs():
+            arcs = iitems(self.missing_branch_arcs())
+        else:
+            arcs = None
+
+        return format_lines(self.statements, self.missing, arcs=arcs)
 
     def has_arcs(self):
         """Were arcs measured in this result?"""
         return self.data.has_arcs()
 
+    @contract(returns='list(tuple(int, int))')
     def arc_possibilities(self):
         """Returns a sorted list of the arcs in the code."""
         return self._arc_possibilities
 
+    @contract(returns='list(tuple(int, int))')
     def arcs_executed(self):
         """Returns a sorted list of the arcs actually executed in the code."""
         executed = self.data.arcs(self.filename) or []
         executed = self.file_reporter.translate_arcs(executed)
         return sorted(executed)
 
+    @contract(returns='list(tuple(int, int))')
     def arcs_missing(self):
         """Returns a sorted list of the arcs in the code not executed."""
         possible = self.arc_possibilities()
@@ -81,24 +93,7 @@
         )
         return sorted(missing)
 
-    def arcs_missing_formatted(self):
-        """The missing branch arcs, formatted nicely.
-
-        Returns a string like "1->2, 1->3, 16->20". Omits any mention of
-        branches from missing lines, so if line 17 is missing, then 17->18
-        won't be included.
-
-        """
-        arcs = self.missing_branch_arcs()
-        missing = self.missing
-        line_exits = sorted(iitems(arcs))
-        pairs = []
-        for line, exits in line_exits:
-            for ex in sorted(exits):
-                if line not in missing:
-                    pairs.append("%d->%s" % (line, (ex if ex > 0 else "exit")))
-        return ', '.join(pairs)
-
+    @contract(returns='list(tuple(int, int))')
     def arcs_unpredicted(self):
         """Returns a sorted list of the executed arcs missing from the code."""
         possible = self.arc_possibilities()
@@ -116,14 +111,15 @@
         )
         return sorted(unpredicted)
 
-    def branch_lines(self):
+    def _branch_lines(self):
         """Returns a list of line numbers that have more than one exit."""
         return [l1 for l1,count in iitems(self.exit_counts) if count > 1]
 
-    def total_branches(self):
+    def _total_branches(self):
         """How many total branches are there?"""
         return sum(count for count in self.exit_counts.values() if count > 1)
 
+    @contract(returns='dict(int: list(int))')
     def missing_branch_arcs(self):
         """Return arcs that weren't executed from branch lines.
 
@@ -131,13 +127,14 @@
 
         """
         missing = self.arcs_missing()
-        branch_lines = set(self.branch_lines())
+        branch_lines = set(self._branch_lines())
         mba = collections.defaultdict(list)
         for l1, l2 in missing:
             if l1 in branch_lines:
                 mba[l1].append(l2)
         return mba
 
+    @contract(returns='dict(int: tuple(int, int))')
     def branch_stats(self):
         """Get stats about branches.
 
@@ -147,7 +144,7 @@
 
         missing_arcs = self.missing_branch_arcs()
         stats = {}
-        for lnum in self.branch_lines():
+        for lnum in self._branch_lines():
             exits = self.exit_counts[lnum]
             try:
                 missing = len(missing_arcs[lnum])
@@ -157,7 +154,7 @@
         return stats
 
 
-class Numbers(SimpleRepr):
+class Numbers(SimpleReprMixin):
     """The numerical results of measuring coverage.
 
     This holds the basic statistics from `Analysis`, and is used to roll
@@ -271,6 +268,61 @@
         return NotImplemented
 
 
+def _line_ranges(statements, lines):
+    """Produce a list of ranges for `format_lines`."""
+    statements = sorted(statements)
+    lines = sorted(lines)
+
+    pairs = []
+    start = None
+    lidx = 0
+    for stmt in statements:
+        if lidx >= len(lines):
+            break
+        if stmt == lines[lidx]:
+            lidx += 1
+            if not start:
+                start = stmt
+            end = stmt
+        elif start:
+            pairs.append((start, end))
+            start = None
+    if start:
+        pairs.append((start, end))
+    return pairs
+
+
+def format_lines(statements, lines, arcs=None):
+    """Nicely format a list of line numbers.
+
+    Format a list of line numbers for printing by coalescing groups of lines as
+    long as the lines represent consecutive statements.  This will coalesce
+    even if there are gaps between statements.
+
+    For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
+    `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
+
+    Both `lines` and `statements` can be any iterable. All of the elements of
+    `lines` must be in `statements`, and all of the values must be positive
+    integers.
+
+    If `arcs` is provided, they are (start,[end,end,end]) pairs that will be
+    included in the output as long as start isn't in `lines`.
+
+    """
+    line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)]
+    if arcs:
+        line_exits = sorted(arcs)
+        for line, exits in line_exits:
+            for ex in sorted(exits):
+                if line not in lines:
+                    dest = (ex if ex > 0 else "exit")
+                    line_items.append((line, "%d->%s" % (line, dest)))
+
+    ret = ', '.join(t[-1] for t in sorted(line_items))
+    return ret
+
+
 @contract(total='number', fail_under='number', precision=int, returns=bool)
 def should_fail_under(total, fail_under, precision):
     """Determine if a total should fail due to fail-under.
@@ -282,6 +334,11 @@
     Returns True if the total should fail.
 
     """
+    # We can never achieve higher than 100% coverage, or less than zero.
+    if not (0 <= fail_under <= 100.0):
+        msg = "fail_under={} is invalid. Must be between 0 and 100.".format(fail_under)
+        raise CoverageException(msg)
+
     # Special case for fail_under=100, it must really be 100.
     if fail_under == 100.0 and total != 100.0:
         return True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/eric6/DebugClients/Python/coverage/sqldata.py	Sat Feb 22 14:27:42 2020 +0100
@@ -0,0 +1,1087 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Sqlite coverage data."""
+
+# TODO: factor out dataop debugging to a wrapper class?
+# TODO: make sure all dataop debugging is in place somehow
+
+import collections
+import datetime
+import glob
+import itertools
+import os
+import re
+import sqlite3
+import sys
+import zlib
+
+from coverage import env
+from coverage.backward import get_thread_id, iitems, to_bytes, to_string
+from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr
+from coverage.files import PathAliases
+from coverage.misc import CoverageException, contract, file_be_gone, filename_suffix, isolate_module
+from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits
+from coverage.version import __version__
+
+os = isolate_module(os)
+
+# If you change the schema, increment the SCHEMA_VERSION, and update the
+# docs in docs/dbschema.rst also.
+
+SCHEMA_VERSION = 7
+
+# Schema versions:
+# 1: Released in 5.0a2
+# 2: Added contexts in 5.0a3.
+# 3: Replaced line table with line_map table.
+# 4: Changed line_map.bitmap to line_map.numbits.
+# 5: Added foreign key declarations.
+# 6: Key-value in meta.
+# 7: line_map -> line_bits
+
+SCHEMA = """\
+CREATE TABLE coverage_schema (
+    -- One row, to record the version of the schema in this db.
+    version integer
+);
+
+CREATE TABLE meta (
+    -- Key-value pairs, to record metadata about the data
+    key text,
+    value text,
+    unique (key)
+    -- Keys:
+    --  'has_arcs' boolean      -- Is this data recording branches?
+    --  'sys_argv' text         -- The coverage command line that recorded the data.
+    --  'version' text          -- The version of coverage.py that made the file.
+    --  'when' text             -- Datetime when the file was created.
+);
+
+CREATE TABLE file (
+    -- A row per file measured.
+    id integer primary key,
+    path text,
+    unique (path)
+);
+
+CREATE TABLE context (
+    -- A row per context measured.
+    id integer primary key,
+    context text,
+    unique (context)
+);
+
+CREATE TABLE line_bits (
+    -- If recording lines, a row per context per file executed.
+    -- All of the line numbers for that file/context are in one numbits.
+    file_id integer,            -- foreign key to `file`.
+    context_id integer,         -- foreign key to `context`.
+    numbits blob,               -- see the numbits functions in coverage.numbits
+    foreign key (file_id) references file (id),
+    foreign key (context_id) references context (id),
+    unique (file_id, context_id)
+);
+
+CREATE TABLE arc (
+    -- If recording branches, a row per context per from/to line transition executed.
+    file_id integer,            -- foreign key to `file`.
+    context_id integer,         -- foreign key to `context`.
+    fromno integer,             -- line number jumped from.
+    tono integer,               -- line number jumped to.
+    foreign key (file_id) references file (id),
+    foreign key (context_id) references context (id),
+    unique (file_id, context_id, fromno, tono)
+);
+
+CREATE TABLE tracer (
+    -- A row per file indicating the tracer used for that file.
+    file_id integer primary key,
+    tracer text,
+    foreign key (file_id) references file (id)
+);
+"""
+
+class CoverageData(SimpleReprMixin):
+    """Manages collected coverage data, including file storage.
+
+    This class is the public supported API to the data that coverage.py
+    collects during program execution.  It includes information about what code
+    was executed. It does not include information from the analysis phase, to
+    determine what lines could have been executed, or what lines were not
+    executed.
+
+    .. note::
+
+        The data file is currently a SQLite database file, with a
+        :ref:`documented schema <dbschema>`. The schema is subject to change
+        though, so be careful about querying it directly. Use this API if you
+        can to isolate yourself from changes.
+
+    There are a number of kinds of data that can be collected:
+
+    * **lines**: the line numbers of source lines that were executed.
+      These are always available.
+
+    * **arcs**: pairs of source and destination line numbers for transitions
+      between source lines.  These are only available if branch coverage was
+      used.
+
+    * **file tracer names**: the module names of the file tracer plugins that
+      handled each file in the data.
+
+    Lines, arcs, and file tracer names are stored for each source file. File
+    names in this API are case-sensitive, even on platforms with
+    case-insensitive file systems.
+
+    A data file either stores lines, or arcs, but not both.
+
+    A data file is associated with the data when the :class:`CoverageData`
+    is created, using the parameters `basename`, `suffix`, and `no_disk`. The
+    base name can be queried with :meth:`base_filename`, and the actual file
+    name being used is available from :meth:`data_filename`.
+
+    To read an existing coverage.py data file, use :meth:`read`.  You can then
+    access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
+    or :meth:`file_tracer`.
+
+    The :meth:`has_arcs` method indicates whether arc data is available.  You
+    can get a set of the files in the data with :meth:`measured_files`.  As
+    with most Python containers, you can determine if there is any data at all
+    by using this object as a boolean value.
+
+    The contexts for each line in a file can be read with
+    :meth:`contexts_by_lineno`.
+
+    To limit querying to certain contexts, use :meth:`set_query_context` or
+    :meth:`set_query_contexts`. These will narrow the focus of subsequent
+    :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set
+    of all measured context names can be retrieved with
+    :meth:`measured_contexts`.
+
+    Most data files will be created by coverage.py itself, but you can use
+    methods here to create data files if you like.  The :meth:`add_lines`,
+    :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
+    that are convenient for coverage.py.
+
+    To record data for contexts, use :meth:`set_context` to set a context to
+    be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls.
+
+    To add a source file without any measured data, use :meth:`touch_file`.
+
+    Write the data to its file with :meth:`write`.
+
+    You can clear the data in memory with :meth:`erase`.  Two data collections
+    can be combined by using :meth:`update` on one :class:`CoverageData`,
+    passing it the other.
+
+    Data in a :class:`CoverageData` can be serialized and deserialized with
+    :meth:`dumps` and :meth:`loads`.
+
+    """
+
+    def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None):
+        """Create a :class:`CoverageData` object to hold coverage-measured data.
+
+        Arguments:
+            basename (str): the base name of the data file, defaulting to
+                ".coverage".
+            suffix (str or bool): has the same meaning as the `data_suffix`
+                argument to :class:`coverage.Coverage`.
+            no_disk (bool): if True, keep all data in memory, and don't
+                write any disk file.
+            warn: a warning callback function, accepting a warning message
+                argument.
+            debug: a `DebugControl` object (optional)
+
+        """
+        self._no_disk = no_disk
+        self._basename = os.path.abspath(basename or ".coverage")
+        self._suffix = suffix
+        self._warn = warn
+        self._debug = debug or NoDebugging()
+
+        self._choose_filename()
+        self._file_map = {}
+        # Maps thread ids to SqliteDb objects.
+        self._dbs = {}
+        self._pid = os.getpid()
+
+        # Are we in sync with the data file?
+        self._have_used = False
+
+        self._has_lines = False
+        self._has_arcs = False
+
+        self._current_context = None
+        self._current_context_id = None
+        self._query_context_ids = None
+
+    def _choose_filename(self):
+        """Set self._filename based on inited attributes."""
+        if self._no_disk:
+            self._filename = ":memory:"
+        else:
+            self._filename = self._basename
+            suffix = filename_suffix(self._suffix)
+            if suffix:
+                self._filename += "." + suffix
+
+    def _reset(self):
+        """Reset our attributes."""
+        if self._dbs:
+            for db in self._dbs.values():
+                db.close()
+        self._dbs = {}
+        self._file_map = {}
+        self._have_used = False
+        self._current_context_id = None
+
+    def _create_db(self):
+        """Create a db file that doesn't exist yet.
+
+        Initializes the schema and certain metadata.
+        """
+        if self._debug.should('dataio'):
+            self._debug.write("Creating data file {!r}".format(self._filename))
+        self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug)
+        with db:
+            db.executescript(SCHEMA)
+            db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
+            db.executemany(
+                "insert into meta (key, value) values (?, ?)",
+                [
+                    ('sys_argv', str(getattr(sys, 'argv', None))),
+                    ('version', __version__),
+                    ('when', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
+                ]
+            )
+
+    def _open_db(self):
+        """Open an existing db file, and read its metadata."""
+        if self._debug.should('dataio'):
+            self._debug.write("Opening data file {!r}".format(self._filename))
+        self._dbs[get_thread_id()] = SqliteDb(self._filename, self._debug)
+        self._read_db()
+
+    def _read_db(self):
+        """Read the metadata from a database so that we are ready to use it."""
+        with self._dbs[get_thread_id()] as db:
+            try:
+                schema_version, = db.execute("select version from coverage_schema").fetchone()
+            except Exception as exc:
+                raise CoverageException(
+                    "Data file {!r} doesn't seem to be a coverage data file: {}".format(
+                        self._filename, exc
+                    )
+                )
+            else:
+                if schema_version != SCHEMA_VERSION:
+                    raise CoverageException(
+                        "Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
+                            self._filename, schema_version, SCHEMA_VERSION
+                        )
+                    )
+
+            for row in db.execute("select value from meta where key = 'has_arcs'"):
+                self._has_arcs = bool(int(row[0]))
+                self._has_lines = not self._has_arcs
+
+            for path, file_id in db.execute("select path, id from file"):
+                self._file_map[path] = file_id
+
+    def _connect(self):
+        """Get the SqliteDb object to use."""
+        if get_thread_id() not in self._dbs:
+            if os.path.exists(self._filename):
+                self._open_db()
+            else:
+                self._create_db()
+        return self._dbs[get_thread_id()]
+
+    def __nonzero__(self):
+        if (get_thread_id() not in self._dbs and not os.path.exists(self._filename)):
+            return False
+        try:
+            with self._connect() as con:
+                rows = con.execute("select * from file limit 1")
+                return bool(list(rows))
+        except CoverageException:
+            return False
+
+    __bool__ = __nonzero__
+
+    @contract(returns='bytes')
+    def dumps(self):
+        """Serialize the current data to a byte string.
+
+        The format of the serialized data is not documented. It is only
+        suitable for use with :meth:`loads` in the same version of
+        coverage.py.
+
+        Returns:
+            A byte string of serialized data.
+
+        .. versionadded:: 5.0
+
+        """
+        if self._debug.should('dataio'):
+            self._debug.write("Dumping data from data file {!r}".format(self._filename))
+        with self._connect() as con:
+            return b'z' + zlib.compress(to_bytes(con.dump()))
+
+    @contract(data='bytes')
+    def loads(self, data):
+        """Deserialize data from :meth:`dumps`
+
+        Use with a newly-created empty :class:`CoverageData` object.  It's
+        undefined what happens if the object already has data in it.
+
+        Arguments:
+            data: A byte string of serialized data produced by :meth:`dumps`.
+
+        .. versionadded:: 5.0
+
+        """
+        if self._debug.should('dataio'):
+            self._debug.write("Loading data into data file {!r}".format(self._filename))
+        if data[:1] != b'z':
+            raise CoverageException(
+                "Unrecognized serialization: {!r} (head of {} bytes)".format(data[:40], len(data))
+                )
+        script = to_string(zlib.decompress(data[1:]))
+        self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug)
+        with db:
+            db.executescript(script)
+        self._read_db()
+        self._have_used = True
+
+    def _file_id(self, filename, add=False):
+        """Get the file id for `filename`.
+
+        If filename is not in the database yet, add it if `add` is True.
+        If `add` is not True, return None.
+        """
+        if filename not in self._file_map:
+            if add:
+                with self._connect() as con:
+                    cur = con.execute("insert or replace into file (path) values (?)", (filename,))
+                    self._file_map[filename] = cur.lastrowid
+        return self._file_map.get(filename)
+
+    def _context_id(self, context):
+        """Get the id for a context."""
+        assert context is not None
+        self._start_using()
+        with self._connect() as con:
+            row = con.execute("select id from context where context = ?", (context,)).fetchone()
+            if row is not None:
+                return row[0]
+            else:
+                return None
+
+    def set_context(self, context):
+        """Set the current context for future :meth:`add_lines` etc.
+
+        `context` is a str, the name of the context to use for the next data
+        additions.  The context persists until the next :meth:`set_context`.
+
+        .. versionadded:: 5.0
+
+        """
+        if self._debug.should('dataop'):
+            self._debug.write("Setting context: %r" % (context,))
+        self._current_context = context
+        self._current_context_id = None
+
+    def _set_context_id(self):
+        """Use the _current_context to set _current_context_id."""
+        context = self._current_context or ""
+        context_id = self._context_id(context)
+        if context_id is not None:
+            self._current_context_id = context_id
+        else:
+            with self._connect() as con:
+                cur = con.execute("insert into context (context) values (?)", (context,))
+                self._current_context_id = cur.lastrowid
+
+    def base_filename(self):
+        """The base filename for storing data.
+
+        .. versionadded:: 5.0
+
+        """
+        return self._basename
+
+    def data_filename(self):
+        """Where is the data stored?
+
+        .. versionadded:: 5.0
+
+        """
+        return self._filename
+
+    def add_lines(self, line_data):
+        """Add measured line data.
+
+        `line_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { lineno: None, ... }, ...}
+
+        """
+        if self._debug.should('dataop'):
+            self._debug.write("Adding lines: %d files, %d lines total" % (
+                len(line_data), sum(len(lines) for lines in line_data.values())
+            ))
+        self._start_using()
+        self._choose_lines_or_arcs(lines=True)
+        if not line_data:
+            return
+        with self._connect() as con:
+            self._set_context_id()
+            for filename, linenos in iitems(line_data):
+                linemap = nums_to_numbits(linenos)
+                file_id = self._file_id(filename, add=True)
+                query = "select numbits from line_bits where file_id = ? and context_id = ?"
+                existing = list(con.execute(query, (file_id, self._current_context_id)))
+                if existing:
+                    linemap = numbits_union(linemap, existing[0][0])
+
+                con.execute(
+                    "insert or replace into line_bits "
+                    " (file_id, context_id, numbits) values (?, ?, ?)",
+                    (file_id, self._current_context_id, linemap),
+                )
+
+    def add_arcs(self, arc_data):
+        """Add measured arc data.
+
+        `arc_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { (l1,l2): None, ... }, ...}
+
+        """
+        if self._debug.should('dataop'):
+            self._debug.write("Adding arcs: %d files, %d arcs total" % (
+                len(arc_data), sum(len(arcs) for arcs in arc_data.values())
+            ))
+        self._start_using()
+        self._choose_lines_or_arcs(arcs=True)
+        if not arc_data:
+            return
+        with self._connect() as con:
+            self._set_context_id()
+            for filename, arcs in iitems(arc_data):
+                file_id = self._file_id(filename, add=True)
+                data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
+                con.executemany(
+                    "insert or ignore into arc "
+                    "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
+                    data,
+                )
+
+    def _choose_lines_or_arcs(self, lines=False, arcs=False):
+        """Force the data file to choose between lines and arcs."""
+        assert lines or arcs
+        assert not (lines and arcs)
+        if lines and self._has_arcs:
+            raise CoverageException("Can't add lines to existing arc data")
+        if arcs and self._has_lines:
+            raise CoverageException("Can't add arcs to existing line data")
+        if not self._has_arcs and not self._has_lines:
+            self._has_lines = lines
+            self._has_arcs = arcs
+            with self._connect() as con:
+                con.execute(
+                    "insert into meta (key, value) values (?, ?)",
+                    ('has_arcs', str(int(arcs)))
+                )
+
+    def add_file_tracers(self, file_tracers):
+        """Add per-file plugin information.
+
+        `file_tracers` is { filename: plugin_name, ... }
+
+        """
+        if self._debug.should('dataop'):
+            self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
+        if not file_tracers:
+            return
+        self._start_using()
+        with self._connect() as con:
+            for filename, plugin_name in iitems(file_tracers):
+                file_id = self._file_id(filename)
+                if file_id is None:
+                    raise CoverageException(
+                        "Can't add file tracer data for unmeasured file '%s'" % (filename,)
+                    )
+
+                existing_plugin = self.file_tracer(filename)
+                if existing_plugin:
+                    if existing_plugin != plugin_name:
+                        raise CoverageException(
+                            "Conflicting file tracer name for '%s': %r vs %r" % (
+                                filename, existing_plugin, plugin_name,
+                            )
+                        )
+                elif plugin_name:
+                    con.execute(
+                        "insert into tracer (file_id, tracer) values (?, ?)",
+                        (file_id, plugin_name)
+                    )
+
+    def touch_file(self, filename, plugin_name=""):
+        """Ensure that `filename` appears in the data, empty if needed.
+
+        `plugin_name` is the name of the plugin responsible for this file. It is used
+        to associate the right filereporter, etc.
+        """
+        if self._debug.should('dataop'):
+            self._debug.write("Touching %r" % (filename,))
+        self._start_using()
+        if not self._has_arcs and not self._has_lines:
+            raise CoverageException("Can't touch files in an empty CoverageData")
+
+        self._file_id(filename, add=True)
+        if plugin_name:
+            # Set the tracer for this file
+            self.add_file_tracers({filename: plugin_name})
+
+    def update(self, other_data, aliases=None):
+        """Update this data with data from several other :class:`CoverageData` instances.
+
+        If `aliases` is provided, it's a `PathAliases` object that is used to
+        re-map paths to match the local machine's.
+        """
+        if self._debug.should('dataop'):
+            self._debug.write("Updating with data from %r" % (
+                getattr(other_data, '_filename', '???'),
+            ))
+        if self._has_lines and other_data._has_arcs:
+            raise CoverageException("Can't combine arc data with line data")
+        if self._has_arcs and other_data._has_lines:
+            raise CoverageException("Can't combine line data with arc data")
+
+        aliases = aliases or PathAliases()
+
+        # Force the database we're writing to to exist before we start nesting
+        # contexts.
+        self._start_using()
+
+        # Collector for all arcs, lines and tracers
+        other_data.read()
+        with other_data._connect() as conn:
+            # Get files data.
+            cur = conn.execute('select path from file')
+            files = {path: aliases.map(path) for (path,) in cur}
+            cur.close()
+
+            # Get contexts data.
+            cur = conn.execute('select context from context')
+            contexts = [context for (context,) in cur]
+            cur.close()
+
+            # Get arc data.
+            cur = conn.execute(
+                'select file.path, context.context, arc.fromno, arc.tono '
+                'from arc '
+                'inner join file on file.id = arc.file_id '
+                'inner join context on context.id = arc.context_id'
+            )
+            arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur]
+            cur.close()
+
+            # Get line data.
+            cur = conn.execute(
+                'select file.path, context.context, line_bits.numbits '
+                'from line_bits '
+                'inner join file on file.id = line_bits.file_id '
+                'inner join context on context.id = line_bits.context_id'
+                )
+            lines = {
+                (files[path], context): numbits
+                for (path, context, numbits) in cur
+                }
+            cur.close()
+
+            # Get tracer data.
+            cur = conn.execute(
+                'select file.path, tracer '
+                'from tracer '
+                'inner join file on file.id = tracer.file_id'
+            )
+            tracers = {files[path]: tracer for (path, tracer) in cur}
+            cur.close()
+
+        with self._connect() as conn:
+            conn.con.isolation_level = 'IMMEDIATE'
+
+            # Get all tracers in the DB. Files not in the tracers are assumed
+            # to have an empty string tracer. Since Sqlite does not support
+            # full outer joins, we have to make two queries to fill the
+            # dictionary.
+            this_tracers = {path: '' for path, in conn.execute('select path from file')}
+            this_tracers.update({
+                aliases.map(path): tracer
+                for path, tracer in conn.execute(
+                    'select file.path, tracer from tracer '
+                    'inner join file on file.id = tracer.file_id'
+                )
+            })
+
+            # Create all file and context rows in the DB.
+            conn.executemany(
+                'insert or ignore into file (path) values (?)',
+                ((file,) for file in files.values())
+            )
+            file_ids = {
+                path: id
+                for id, path in conn.execute('select id, path from file')
+            }
+            conn.executemany(
+                'insert or ignore into context (context) values (?)',
+                ((context,) for context in contexts)
+            )
+            context_ids = {
+                context: id
+                for id, context in conn.execute('select id, context from context')
+            }
+
+            # Prepare tracers and fail, if a conflict is found.
+            # tracer_paths is used to ensure consistency over the tracer data
+            # and tracer_map tracks the tracers to be inserted.
+            tracer_map = {}
+            for path in files.values():
+                this_tracer = this_tracers.get(path)
+                other_tracer = tracers.get(path, '')
+                # If there is no tracer, there is always the None tracer.
+                if this_tracer is not None and this_tracer != other_tracer:
+                    raise CoverageException(
+                        "Conflicting file tracer name for '%s': %r vs %r" % (
+                            path, this_tracer, other_tracer
+                        )
+                    )
+                tracer_map[path] = other_tracer
+
+            # Prepare arc and line rows to be inserted by converting the file
+            # and context strings with integer ids. Then use the efficient
+            # `executemany()` to insert all rows at once.
+            arc_rows = (
+                (file_ids[file], context_ids[context], fromno, tono)
+                for file, context, fromno, tono in arcs
+            )
+
+            # Get line data.
+            cur = conn.execute(
+                'select file.path, context.context, line_bits.numbits '
+                'from line_bits '
+                'inner join file on file.id = line_bits.file_id '
+                'inner join context on context.id = line_bits.context_id'
+                )
+            for path, context, numbits in cur:
+                key = (aliases.map(path), context)
+                if key in lines:
+                    numbits = numbits_union(lines[key], numbits)
+                lines[key] = numbits
+            cur.close()
+
+            if arcs:
+                self._choose_lines_or_arcs(arcs=True)
+
+                # Write the combined data.
+                conn.executemany(
+                    'insert or ignore into arc '
+                    '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)',
+                    arc_rows
+                )
+
+            if lines:
+                self._choose_lines_or_arcs(lines=True)
+                conn.execute("delete from line_bits")
+                conn.executemany(
+                    "insert into line_bits "
+                    "(file_id, context_id, numbits) values (?, ?, ?)",
+                    [
+                        (file_ids[file], context_ids[context], numbits)
+                        for (file, context), numbits in lines.items()
+                    ]
+                )
+            conn.executemany(
+                'insert or ignore into tracer (file_id, tracer) values (?, ?)',
+                ((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
+            )
+
+        # Update all internal cache data.
+        self._reset()
+        self.read()
+
+    def erase(self, parallel=False):
+        """Erase the data in this object.
+
+        If `parallel` is true, then also deletes data files created from the
+        basename by parallel-mode.
+
+        """
+        self._reset()
+        if self._no_disk:
+            return
+        if self._debug.should('dataio'):
+            self._debug.write("Erasing data file {!r}".format(self._filename))
+        file_be_gone(self._filename)
+        if parallel:
+            data_dir, local = os.path.split(self._filename)
+            localdot = local + '.*'
+            pattern = os.path.join(os.path.abspath(data_dir), localdot)
+            for filename in glob.glob(pattern):
+                if self._debug.should('dataio'):
+                    self._debug.write("Erasing parallel data file {!r}".format(filename))
+                file_be_gone(filename)
+
+    def read(self):
+        """Start using an existing data file."""
+        with self._connect():       # TODO: doesn't look right
+            self._have_used = True
+
+    def write(self):
+        """Ensure the data is written to the data file."""
+        pass
+
+    def _start_using(self):
+        """Call this before using the database at all."""
+        if self._pid != os.getpid():
+            # Looks like we forked! Have to start a new data file.
+            self._reset()
+            self._choose_filename()
+            self._pid = os.getpid()
+        if not self._have_used:
+            self.erase()
+        self._have_used = True
+
+    def has_arcs(self):
+        """Does the database have arcs (True) or lines (False)."""
+        return bool(self._has_arcs)
+
+    def measured_files(self):
+        """A set of all files that had been measured."""
+        return set(self._file_map)
+
+    def measured_contexts(self):
+        """A set of all contexts that have been measured.
+
+        .. versionadded:: 5.0
+
+        """
+        self._start_using()
+        with self._connect() as con:
+            contexts = set(row[0] for row in con.execute("select distinct(context) from context"))
+        return contexts
+
+    def file_tracer(self, filename):
+        """Get the plugin name of the file tracer for a file.
+
+        Returns the name of the plugin that handles this file.  If the file was
+        measured, but didn't use a plugin, then "" is returned.  If the file
+        was not measured, then None is returned.
+
+        """
+        self._start_using()
+        with self._connect() as con:
+            file_id = self._file_id(filename)
+            if file_id is None:
+                return None
+            row = con.execute("select tracer from tracer where file_id = ?", (file_id,)).fetchone()
+            if row is not None:
+                return row[0] or ""
+            return ""   # File was measured, but no tracer associated.
+
+    def set_query_context(self, context):
+        """Set a context for subsequent querying.
+
+        The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
+        calls will be limited to only one context.  `context` is a string which
+        must match a context exactly.  If it does not, no exception is raised,
+        but queries will return no data.
+
+        .. versionadded:: 5.0
+
+        """
+        self._start_using()
+        with self._connect() as con:
+            cur = con.execute("select id from context where context = ?", (context,))
+            self._query_context_ids = [row[0] for row in cur.fetchall()]
+
+    def set_query_contexts(self, contexts):
+        """Set a number of contexts for subsequent querying.
+
+        The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
+        calls will be limited to the specified contexts.  `contexts` is a list
+        of Python regular expressions.  Contexts will be matched using
+        :func:`re.search <python:re.search>`.  Data will be included in query
+        results if they are part of any of the contexts matched.
+
+        .. versionadded:: 5.0
+
+        """
+        self._start_using()
+        if contexts:
+            with self._connect() as con:
+                context_clause = ' or '.join(['context regexp ?'] * len(contexts))
+                cur = con.execute("select id from context where " + context_clause, contexts)
+                self._query_context_ids = [row[0] for row in cur.fetchall()]
+        else:
+            self._query_context_ids = None
+
+    def lines(self, filename):
+        """Get the list of lines executed for a file.
+
+        If the file was not measured, returns None.  A file might be measured,
+        and have no lines executed, in which case an empty list is returned.
+
+        If the file was executed, returns a list of integers, the line numbers
+        executed in the file. The list is in no particular order.
+
+        """
+        self._start_using()
+        if self.has_arcs():
+            arcs = self.arcs(filename)
+            if arcs is not None:
+                all_lines = itertools.chain.from_iterable(arcs)
+                return list(set(l for l in all_lines if l > 0))
+
+        with self._connect() as con:
+            file_id = self._file_id(filename)
+            if file_id is None:
+                return None
+            else:
+                query = "select numbits from line_bits where file_id = ?"
+                data = [file_id]
+                if self._query_context_ids is not None:
+                    ids_array = ', '.join('?' * len(self._query_context_ids))
+                    query += " and context_id in (" + ids_array + ")"
+                    data += self._query_context_ids
+                bitmaps = list(con.execute(query, data))
+                nums = set()
+                for row in bitmaps:
+                    nums.update(numbits_to_nums(row[0]))
+                return list(nums)
+
+    def arcs(self, filename):
+        """Get the list of arcs executed for a file.
+
+        If the file was not measured, returns None.  A file might be measured,
+        and have no arcs executed, in which case an empty list is returned.
+
+        If the file was executed, returns a list of 2-tuples of integers. Each
+        pair is a starting line number and an ending line number for a
+        transition from one line to another. The list is in no particular
+        order.
+
+        Negative numbers have special meaning.  If the starting line number is
+        -N, it represents an entry to the code object that starts at line N.
+        If the ending ling number is -N, it's an exit from the code object that
+        starts at line N.
+
+        """
+        self._start_using()
+        with self._connect() as con:
+            file_id = self._file_id(filename)
+            if file_id is None:
+                return None
+            else:
+                query = "select distinct fromno, tono from arc where file_id = ?"
+                data = [file_id]
+                if self._query_context_ids is not None:
+                    ids_array = ', '.join('?' * len(self._query_context_ids))
+                    query += " and context_id in (" + ids_array + ")"
+                    data += self._query_context_ids
+                arcs = con.execute(query, data)
+                return list(arcs)
+
+    def contexts_by_lineno(self, filename):
+        """Get the contexts for each line in a file.
+
+        Returns:
+            A dict mapping line numbers to a list of context names.
+
+        .. versionadded:: 5.0
+
+        """
+        lineno_contexts_map = collections.defaultdict(list)
+        self._start_using()
+        with self._connect() as con:
+            file_id = self._file_id(filename)
+            if file_id is None:
+                return lineno_contexts_map
+            if self.has_arcs():
+                query = (
+                    "select arc.fromno, arc.tono, context.context "
+                    "from arc, context "
+                    "where arc.file_id = ? and arc.context_id = context.id"
+                )
+                data = [file_id]
+                if self._query_context_ids is not None:
+                    ids_array = ', '.join('?' * len(self._query_context_ids))
+                    query += " and arc.context_id in (" + ids_array + ")"
+                    data += self._query_context_ids
+                for fromno, tono, context in con.execute(query, data):
+                    if context not in lineno_contexts_map[fromno]:
+                        lineno_contexts_map[fromno].append(context)
+                    if context not in lineno_contexts_map[tono]:
+                        lineno_contexts_map[tono].append(context)
+            else:
+                query = (
+                    "select l.numbits, c.context from line_bits l, context c "
+                    "where l.context_id = c.id "
+                    "and file_id = ?"
+                    )
+                data = [file_id]
+                if self._query_context_ids is not None:
+                    ids_array = ', '.join('?' * len(self._query_context_ids))
+                    query += " and l.context_id in (" + ids_array + ")"
+                    data += self._query_context_ids
+                for numbits, context in con.execute(query, data):
+                    for lineno in numbits_to_nums(numbits):
+                        lineno_contexts_map[lineno].append(context)
+        return lineno_contexts_map
+
+    @classmethod
+    def sys_info(cls):
+        """Our information for `Coverage.sys_info`.
+