Merged with coverage.py update.

Sat, 10 Oct 2015 12:44:52 +0200

author
Detlev Offenbach <detlev@die-offenbachs.de>
date
Sat, 10 Oct 2015 12:44:52 +0200
changeset 4491
0d8612e24fef
parent 4487
4ba7a8ab24f2 (current diff)
parent 4490
3f58261e7bb1 (diff)
child 4492
1a958c27b767

Merged with coverage.py update.

DebugClients/Python/coverage/__init__.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/__main__.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/annotate.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/backward.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/bytecode.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/cmdline.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/codeunit.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/collector.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/config.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/control.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/data.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/debug.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/doc/PKG-INFO file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/doc/README.txt file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/execfile.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/html.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/coverage_html.js file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/index.html file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/jquery.hotkeys.js file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/jquery.isonscreen.js file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/jquery.min.js file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/jquery.tablesorter.min.js file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/keybd_closed.png file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/keybd_open.png file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/pyfile.html file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/htmlfiles/style.css file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/misc.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/parser.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/phystokens.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/report.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/results.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/summary.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/templite.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/version.py file | annotate | diff | comparison | revisions
DebugClients/Python/coverage/xmlreport.py file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/codeunit.py file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/doc/PKG-INFO file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/doc/README.txt file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/coverage_html.js file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/index.html file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/jquery.hotkeys.js file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/jquery.isonscreen.js file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/jquery.min.js file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/jquery.tablesorter.min.js file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/keybd_closed.png file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/keybd_open.png file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/pyfile.html file | annotate | diff | comparison | revisions
DebugClients/Python3/coverage/htmlfiles/style.css file | annotate | diff | comparison | revisions
eric6.e4p file | annotate | diff | comparison | revisions
--- a/DataViews/PyCoverageDialog.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DataViews/PyCoverageDialog.py	Sat Oct 10 12:44:52 2015 +0200
@@ -10,7 +10,6 @@
 from __future__ import unicode_literals
 
 import os
-import sys
 
 from PyQt5.QtCore import pyqtSlot, Qt
 from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QMenu, QHeaderView, \
@@ -23,12 +22,8 @@
 from .Ui_PyCoverageDialog import Ui_PyCoverageDialog
 
 import Utilities
-if sys.version_info[0] == 2:
-    from DebugClients.Python.coverage import coverage
-    from DebugClients.Python.coverage.misc import CoverageException
-else:
-    from DebugClients.Python3.coverage import coverage
-    from DebugClients.Python3.coverage.misc import CoverageException
+from coverage import coverage
+from coverage.misc import CoverageException
 
 
 class PyCoverageDialog(QDialog, Ui_PyCoverageDialog):
@@ -174,7 +169,6 @@
         files.sort()
         
         cover = coverage(data_file=self.cfn)
-        cover.use_cache(True)
         cover.load()
         
         # set the exclude pattern
@@ -322,7 +316,6 @@
         fn = itm.text(0)
         
         cover = coverage(data_file=self.cfn)
-        cover.use_cache(True)
         cover.exclude(self.excludeList[0])
         cover.load()
         cover.annotate([fn], None, True)
@@ -345,7 +338,6 @@
             files.append(itm.text(0))
         
         cover = coverage(data_file=self.cfn)
-        cover.use_cache(True)
         cover.exclude(self.excludeList[0])
         cover.load()
         
@@ -374,7 +366,6 @@
         stored in the .coverage file.
         """
         cover = coverage(data_file=self.cfn)
-        cover.use_cache(True)
         cover.load()
         cover.erase()
         
--- a/DebugClients/Python/DebugClientBase.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/DebugClientBase.py	Sat Oct 10 12:44:52 2015 +0200
@@ -610,7 +610,6 @@
                 self.cover = coverage(
                     auto_data=True,
                     data_file="%s.coverage" % os.path.splitext(sys.argv[0])[0])
-                self.cover.use_cache(True)
                 
                 if int(erase):
                     self.cover.erase()
@@ -893,7 +892,6 @@
                     self.cover = coverage(
                         auto_data=True,
                         data_file="%s.coverage" % os.path.splitext(covname)[0])
-                    self.cover.use_cache(True)
                     if int(erase):
                         self.cover.erase()
                 else:
--- a/DebugClients/Python/coverage/__init__.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/__init__.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,3 +1,6 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Code coverage measurement for Python.
 
 Ned Batchelder
@@ -5,80 +8,22 @@
 
 """
 
-from .version import __version__, __url__
-
-from .control import coverage, process_startup
-from .data import CoverageData
-from .cmdline import main, CoverageScript
-from .misc import CoverageException
-
-# Module-level functions.  The original API to this module was based on
-# functions defined directly in the module, with a singleton of the coverage()
-# class.  That design hampered programmability, so the current api uses
-# explicitly-created coverage objects.  But for backward compatibility, here we
-# define the top-level functions to create the singleton when they are first
-# called.
-
-# Singleton object for use with module-level functions.  The singleton is
-# created as needed when one of the module-level functions is called.
-_the_coverage = None
-
-def _singleton_method(name):
-    """Return a function to the `name` method on a singleton `coverage` object.
-
-    The singleton object is created the first time one of these functions is
-    called.
-
-    """
-    # Disable pylint msg W0612, because a bunch of variables look unused, but
-    # they're accessed via locals().
-    # pylint: disable=W0612
+from coverage.version import __version__, __url__, version_info
 
-    def wrapper(*args, **kwargs):
-        """Singleton wrapper around a coverage method."""
-        global _the_coverage
-        if not _the_coverage:
-            _the_coverage = coverage(auto_data=True)
-        return getattr(_the_coverage, name)(*args, **kwargs)
-
-    import inspect
-    meth = getattr(coverage, name)
-    args, varargs, kw, defaults = inspect.getargspec(meth)
-    argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
-    docstring = meth.__doc__
-    wrapper.__doc__ = ("""\
-        A first-use-singleton wrapper around coverage.%(name)s.
-
-        This wrapper is provided for backward compatibility with legacy code.
-        New code should use coverage.%(name)s directly.
+from coverage.control import Coverage, process_startup
+from coverage.data import CoverageData
+from coverage.misc import CoverageException
+from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
 
-        %(name)s%(argspec)s:
-
-        %(docstring)s
-        """ % locals()
-        )
-
-    return wrapper
-
-
-# Define the module-level functions.
-use_cache = _singleton_method('use_cache')
-start =     _singleton_method('start')
-stop =      _singleton_method('stop')
-erase =     _singleton_method('erase')
-exclude =   _singleton_method('exclude')
-analysis =  _singleton_method('analysis')
-analysis2 = _singleton_method('analysis2')
-report =    _singleton_method('report')
-annotate =  _singleton_method('annotate')
-
+# Backward compatibility.
+coverage = Coverage
 
 # On Windows, we encode and decode deep enough that something goes wrong and
 # the encodings.utf_8 module is loaded and then unloaded, I don't know why.
 # Adding a reference here prevents it from being unloaded.  Yuk.
 import encodings.utf_8
 
-# Because of the "from .control import fooey" lines at the top of the
+# Because of the "from coverage.control import fooey" lines at the top of the
 # file, there's an entry for coverage.coverage in sys.modules, mapped to None.
 # This makes some inspection tools (like pydoc) unable to find the class
 # coverage.coverage.  So remove that entry.
@@ -88,36 +33,5 @@
 except KeyError:
     pass
 
-
-# COPYRIGHT AND LICENSE
-#
-# Copyright 2001 Gareth Rees.  All rights reserved.
-# Copyright 2004-2013 Ned Batchelder.  All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# 1. Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in the
-#    documentation and/or other materials provided with the
-#    distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-# DAMAGE.
-
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/__main__.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/__main__.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,6 +1,10 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Coverage.py's main entry point."""
+
 import sys
-from .cmdline import main
+from coverage.cmdline import main
 sys.exit(main())
 
 #
--- a/DebugClients/Python/coverage/annotate.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/annotate.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,9 +1,14 @@
-"""Source file annotation for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Source file annotation for coverage.py."""
 
-import os, re
+import io
+import os
+import re
 
-from .backward import sorted                    # pylint: disable=W0622
-from .report import Reporter
+from coverage.files import flat_rootname
+from coverage.report import Reporter
 
 class AnnotateReporter(Reporter):
     """Generate annotated source files showing line coverage.
@@ -42,64 +47,56 @@
         """
         self.report_files(self.annotate_file, morfs, directory)
 
-    def annotate_file(self, cu, analysis):
+    def annotate_file(self, fr, analysis):
         """Annotate a single file.
 
-        `cu` is the CodeUnit for the file to annotate.
+        `fr` is the FileReporter for the file to annotate.
 
         """
-        if not cu.relative:
-            return
-
-        filename = cu.filename
-        source = cu.source_file()
-        if self.directory:
-            dest_file = os.path.join(self.directory, cu.flat_rootname())
-            dest_file += ".py,cover"
-        else:
-            dest_file = filename + ",cover"
-        dest = open(dest_file, 'w')
-
         statements = sorted(analysis.statements)
         missing = sorted(analysis.missing)
         excluded = sorted(analysis.excluded)
 
-        lineno = 0
-        i = 0
-        j = 0
-        covered = True
-        while True:
-            line = source.readline()
-            if line == '':
-                break
-            lineno += 1
-            while i < len(statements) and statements[i] < lineno:
-                i += 1
-            while j < len(missing) and missing[j] < lineno:
-                j += 1
-            if i < len(statements) and statements[i] == lineno:
-                covered = j >= len(missing) or missing[j] > lineno
-            if self.blank_re.match(line):
-                dest.write('  ')
-            elif self.else_re.match(line):
-                # Special logic for lines containing only 'else:'.
-                if i >= len(statements) and j >= len(missing):
-                    dest.write('! ')
-                elif i >= len(statements) or j >= len(missing):
-                    dest.write('> ')
-                elif statements[i] == missing[j]:
-                    dest.write('! ')
+        if self.directory:
+            dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
+            if dest_file.endswith("_py"):
+                dest_file = dest_file[:-3] + ".py"
+            dest_file += ",cover"
+        else:
+            dest_file = fr.filename + ",cover"
+
+        with io.open(dest_file, 'w', encoding='utf8') as dest:
+            i = 0
+            j = 0
+            covered = True
+            source = fr.source()
+            for lineno, line in enumerate(source.splitlines(True), start=1):
+                while i < len(statements) and statements[i] < lineno:
+                    i += 1
+                while j < len(missing) and missing[j] < lineno:
+                    j += 1
+                if i < len(statements) and statements[i] == lineno:
+                    covered = j >= len(missing) or missing[j] > lineno
+                if self.blank_re.match(line):
+                    dest.write(u'  ')
+                elif self.else_re.match(line):
+                    # Special logic for lines containing only 'else:'.
+                    if i >= len(statements) and j >= len(missing):
+                        dest.write(u'! ')
+                    elif i >= len(statements) or j >= len(missing):
+                        dest.write(u'> ')
+                    elif statements[i] == missing[j]:
+                        dest.write(u'! ')
+                    else:
+                        dest.write(u'> ')
+                elif lineno in excluded:
+                    dest.write(u'- ')
+                elif covered:
+                    dest.write(u'> ')
                 else:
-                    dest.write('> ')
-            elif lineno in excluded:
-                dest.write('- ')
-            elif covered:
-                dest.write('> ')
-            else:
-                dest.write('! ')
-            dest.write(line)
-        source.close()
-        dest.close()
+                    dest.write(u'! ')
+
+                dest.write(line)
 
 #
 # eflag: FileType = Python2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/DebugClients/Python/coverage/backunittest.py	Sat Oct 10 12:44:52 2015 +0200
@@ -0,0 +1,42 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Implementations of unittest features from the future."""
+
+# Use unittest2 if it's available, otherwise unittest.  This gives us
+# back-ported features for 2.6.
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+
+def unittest_has(method):
+    """Does `unittest.TestCase` have `method` defined?"""
+    return hasattr(unittest.TestCase, method)
+
+
+class TestCase(unittest.TestCase):
+    """Just like unittest.TestCase, but with assert methods added.
+
+    Designed to be compatible with 3.1 unittest.  Methods are only defined if
+    `unittest` doesn't have them.
+
+    """
+    # pylint: disable=missing-docstring
+
+    # Many Pythons have this method defined.  But PyPy3 has a bug with it
+    # somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our
+    # own implementation that works everywhere, at least for the ways we're
+    # calling it.
+    def assertCountEqual(self, s1, s2):
+        """Assert these have the same elements, regardless of order."""
+        self.assertEqual(sorted(s1), sorted(s2))
+
+    if not unittest_has('assertRaisesRegex'):
+        def assertRaisesRegex(self, *args, **kwargs):
+            return self.assertRaisesRegexp(*args, **kwargs)
+
+    if not unittest_has('assertRegex'):
+        def assertRegex(self, *args, **kwargs):
+            return self.assertRegexpMatches(*args, **kwargs)
--- a/DebugClients/Python/coverage/backward.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/backward.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,60 +1,29 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Add things to old Pythons so I can pretend they are newer."""
 
-# This file does lots of tricky stuff, so disable a bunch of lintisms.
-# pylint: disable=F0401,W0611,W0622
-# F0401: Unable to import blah
-# W0611: Unused import blah
-# W0622: Redefining built-in blah
-
-import os, re, sys
-
-# Python 2.3 doesn't have `set`
-try:
-    set = set       # new in 2.4
-except NameError:
-    from sets import Set as set
-
-# Python 2.3 doesn't have `sorted`.
-try:
-    sorted = sorted
-except NameError:
-    def sorted(iterable):
-        """A 2.3-compatible implementation of `sorted`."""
-        lst = list(iterable)
-        lst.sort()
-        return lst
+# This file does lots of tricky stuff, so disable a bunch of pylint warnings.
+# pylint: disable=redefined-builtin
+# pylint: disable=unused-import
+# pylint: disable=no-name-in-module
 
-# Python 2.3 doesn't have `reversed`.
-try:
-    reversed = reversed
-except NameError:
-    def reversed(iterable):
-        """A 2.3-compatible implementation of `reversed`."""
-        lst = list(iterable)
-        return lst[::-1]
+import sys
 
-# rpartition is new in 2.5
-try:
-    "".rpartition
-except AttributeError:
-    def rpartition(s, sep):
-        """Implement s.rpartition(sep) for old Pythons."""
-        i = s.rfind(sep)
-        if i == -1:
-            return ('', '', s)
-        else:
-            return (s[:i], sep, s[i+len(sep):])
-else:
-    def rpartition(s, sep):
-        """A common interface for new Pythons."""
-        return s.rpartition(sep)
+from coverage import env
 
-# Pythons 2 and 3 differ on where to get StringIO
+
+# Pythons 2 and 3 differ on where to get StringIO.
 try:
     from cStringIO import StringIO
-    BytesIO = StringIO
 except ImportError:
-    from io import StringIO, BytesIO
+    from io import StringIO
+
+# In py3, ConfigParser was renamed to the more-standard configparser
+try:
+    import configparser
+except ImportError:
+    import ConfigParser as configparser
 
 # What's a string called?
 try:
@@ -62,6 +31,12 @@
 except NameError:
     string_class = str
 
+# What's a Unicode string called?
+try:
+    unicode_class = unicode
+except NameError:
+    unicode_class = str
+
 # Where do pickles come from?
 try:
     import cPickle as pickle
@@ -86,60 +61,25 @@
         """Produce the items from dict `d`."""
         return d.iteritems()
 
-# Exec is a statement in Py2, a function in Py3
-if sys.version_info >= (3, 0):
-    def exec_code_object(code, global_map):
-        """A wrapper around exec()."""
-        exec(code, global_map)
+# Getting the `next` function from an iterator is different in 2 and 3.
+try:
+    iter([]).next
+except AttributeError:
+    def iternext(seq):
+        """Get the `next` function for iterating over `seq`."""
+        return iter(seq).__next__
 else:
-    # OK, this is pretty gross.  In Py2, exec was a statement, but that will
-    # be a syntax error if we try to put it in a Py3 file, even if it is never
-    # executed.  So hide it inside an evaluated string literal instead.
-    eval(
-        compile(
-            "def exec_code_object(code, global_map):\n"
-            "    exec code in global_map\n",
-            "<exec_function>", "exec"
-            )
-        )
-
-# Reading Python source and interpreting the coding comment is a big deal.
-if sys.version_info >= (3, 0):
-    # Python 3.2 provides `tokenize.open`, the best way to open source files.
-    import tokenize
-    try:
-        open_source = tokenize.open     # pylint: disable=E1101
-    except AttributeError:
-        from io import TextIOWrapper
-        detect_encoding = tokenize.detect_encoding  # pylint: disable=E1101
-        # Copied from the 3.2 stdlib:
-        def open_source(fname):
-            """Open a file in read only mode using the encoding detected by
-            detect_encoding().
-            """
-            buffer = open(fname, 'rb')
-            encoding, _ = detect_encoding(buffer.readline)
-            buffer.seek(0)
-            text = TextIOWrapper(buffer, encoding, line_buffering=True)
-            text.mode = 'r'
-            return text
-else:
-    def open_source(fname):
-        """Open a source file the best way."""
-        return open(fname, "rU")
-
+    def iternext(seq):
+        """Get the `next` function for iterating over `seq`."""
+        return iter(seq).next
 
 # Python 3.x is picky about bytes and strings, so provide methods to
 # get them right, and make them no-ops in 2.x
-if sys.version_info >= (3, 0):
+if env.PY3:
     def to_bytes(s):
         """Convert string `s` to bytes."""
         return s.encode('utf8')
 
-    def to_string(b):
-        """Convert bytes `b` to a string."""
-        return b.decode('utf8')
-
     def binary_bytes(byte_values):
         """Produce a byte string with the ints from `byte_values`."""
         return bytes(byte_values)
@@ -150,7 +90,7 @@
 
     def bytes_to_ints(bytes_value):
         """Turn a bytes object into a sequence of ints."""
-        # In Py3, iterating bytes gives ints.
+        # In Python 3, iterating bytes gives ints.
         return bytes_value
 
 else:
@@ -158,13 +98,9 @@
         """Convert string `s` to bytes (no-op in 2.x)."""
         return s
 
-    def to_string(b):
-        """Convert bytes `b` to a string (no-op in 2.x)."""
-        return b
-
     def binary_bytes(byte_values):
         """Produce a byte string with the ints from `byte_values`."""
-        return "".join([chr(b) for b in byte_values])
+        return "".join(chr(b) for b in byte_values)
 
     def byte_to_int(byte_value):
         """Turn an element of a bytes object into an int."""
@@ -175,13 +111,62 @@
         for byte in bytes_value:
             yield ord(byte)
 
-# Md5 is available in different places.
+
+try:
+    # In Python 2.x, the builtins were in __builtin__
+    BUILTINS = sys.modules['__builtin__']
+except KeyError:
+    # In Python 3.x, they're in builtins
+    BUILTINS = sys.modules['builtins']
+
+
+# imp was deprecated in Python 3.3
+try:
+    import importlib
+    import importlib.util
+    imp = None
+except ImportError:
+    importlib = None
+
+# We only want to use importlib if it has everything we need.
+try:
+    importlib_util_find_spec = importlib.util.find_spec
+except Exception:
+    import imp
+    importlib_util_find_spec = None
+
+# What is the .pyc magic number for this version of Python?
 try:
-    import hashlib
-    md5 = hashlib.md5
-except ImportError:
-    import md5
-    md5 = md5.new
+    PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
+except AttributeError:
+    PYC_MAGIC_NUMBER = imp.get_magic()
+
+
+def import_local_file(modname):
+    """Import a local file as a module.
+
+    Opens a file in the current directory named `modname`.py, imports it
+    as `modname`, and returns the module object.
+
+    """
+    try:
+        from importlib.machinery import SourceFileLoader
+    except ImportError:
+        SourceFileLoader = None
+
+    modfile = modname + '.py'
+    if SourceFileLoader:
+        mod = SourceFileLoader(modname, modfile).load_module()
+    else:
+        for suff in imp.get_suffixes():                 # pragma: part covered
+            if suff[0] == '.py':
+                break
+
+        with open(modfile, 'r') as f:
+            # pylint: disable=undefined-loop-variable
+            mod = imp.load_module(modname, f, modfile, suff)
+
+    return mod
 
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/bytecode.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/bytecode.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,8 +1,13 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Bytecode manipulation for coverage.py"""
 
-import opcode, types
+import opcode
+import types
 
-from .backward import byte_to_int
+from coverage.backward import byte_to_int
+
 
 class ByteCode(object):
     """A single bytecode."""
@@ -26,10 +31,12 @@
 class ByteCodes(object):
     """Iterator over byte codes in `code`.
 
+    This handles the logic of EXTENDED_ARG byte codes internally.  Those byte
+    codes are not returned by this iterator.
+
     Returns `ByteCode` objects.
 
     """
-    # pylint: disable=R0924
     def __init__(self, code):
         self.code = code
 
@@ -38,6 +45,7 @@
 
     def __iter__(self):
         offset = 0
+        ext_arg = 0
         while offset < len(self.code):
             bc = ByteCode()
             bc.op = self[offset]
@@ -45,7 +53,7 @@
 
             next_offset = offset+1
             if bc.op >= opcode.HAVE_ARGUMENT:
-                bc.arg = self[offset+1] + 256*self[offset+2]
+                bc.arg = ext_arg + self[offset+1] + 256*self[offset+2]
                 next_offset += 2
 
                 label = -1
@@ -56,7 +64,11 @@
                 bc.jump_to = label
 
             bc.next_offset = offset = next_offset
-            yield bc
+            if bc.op == opcode.EXTENDED_ARG:
+                ext_arg = bc.arg * 256*256
+            else:
+                ext_arg = 0
+                yield bc
 
 
 class CodeObjects(object):
--- a/DebugClients/Python/coverage/cmdline.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/cmdline.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,18 +1,25 @@
-"""Command-line support for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-import optparse, os, sys, time, traceback
+"""Command-line support for coverage.py."""
 
-from .backward import sorted                # pylint: disable=W0622
-from .execfile import run_python_file, run_python_module
-from .misc import CoverageException, ExceptionDuringRun, NoSource
-from .debug import info_formatter
+import glob
+import optparse
+import os.path
+import sys
+import traceback
+
+from coverage import env
+from coverage.execfile import run_python_file, run_python_module
+from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
+from coverage.debug import info_formatter, info_header
 
 
 class Opts(object):
     """A namespace class for individual options we'll build parsers from."""
 
     append = optparse.make_option(
-        '-a', '--append', action='store_false', dest="erase_first",
+        '-a', '--append', action='store_true',
         help="Append coverage data to .coverage, otherwise it is started "
                 "clean with each run."
         )
@@ -20,6 +27,15 @@
         '', '--branch', action='store_true',
         help="Measure branch coverage in addition to statement coverage."
         )
+    CONCURRENCY_CHOICES = [
+        "thread", "gevent", "greenlet", "eventlet", "multiprocessing",
+    ]
+    concurrency = optparse.make_option(
+        '', '--concurrency', action='store', metavar="LIB",
+        choices=CONCURRENCY_CHOICES,
+        help="Properly measure code using a concurrency library. "
+            "Valid values are: %s." % ", ".join(CONCURRENCY_CHOICES)
+        )
     debug = optparse.make_option(
         '', '--debug', action='store', metavar="OPTS",
         help="Debug options, separated by commas"
@@ -43,8 +59,8 @@
     include = optparse.make_option(
         '', '--include', action='store',
         metavar="PAT1,PAT2,...",
-        help="Include files only when their filename path matches one of "
-                "these patterns.  Usually needs quoting on the command line."
+        help="Include only files whose paths match one of these patterns. "
+                "Accepts shell-style wildcards, which must be quoted."
         )
     pylib = optparse.make_option(
         '-L', '--pylib', action='store_true',
@@ -56,17 +72,15 @@
         help="Show line numbers of statements in each module that weren't "
                 "executed."
         )
-    old_omit = optparse.make_option(
-        '-o', '--omit', action='store',
-        metavar="PAT1,PAT2,...",
-        help="Omit files when their filename matches one of these patterns. "
-                "Usually needs quoting on the command line."
+    skip_covered = optparse.make_option(
+        '--skip-covered', action='store_true',
+        help="Skip files with 100% coverage."
         )
     omit = optparse.make_option(
         '', '--omit', action='store',
         metavar="PAT1,PAT2,...",
-        help="Omit files when their filename matches one of these patterns. "
-                "Usually needs quoting on the command line."
+        help="Omit files whose paths match one of these patterns. "
+                "Accepts shell-style wildcards, which must be quoted."
         )
     output_xml = optparse.make_option(
         '-o', '', action='store', dest="outfile",
@@ -108,7 +122,7 @@
 
 
 class CoverageOptionParser(optparse.OptionParser, object):
-    """Base OptionParser for coverage.
+    """Base OptionParser for coverage.py.
 
     Problems don't exit the program.
     Defaults are initialized for all options.
@@ -120,24 +134,26 @@
             add_help_option=False, *args, **kwargs
             )
         self.set_defaults(
-            actions=[],
+            action=None,
+            append=None,
             branch=None,
+            concurrency=None,
             debug=None,
             directory=None,
             fail_under=None,
             help=None,
             ignore_errors=None,
             include=None,
+            module=None,
             omit=None,
             parallel_mode=None,
-            module=None,
             pylib=None,
             rcfile=True,
             show_missing=None,
+            skip_covered=None,
             source=None,
             timid=None,
             title=None,
-            erase_first=None,
             version=None,
             )
 
@@ -152,7 +168,7 @@
         """Used to stop the optparse error handler ending the process."""
         pass
 
-    def parse_args(self, args=None, options=None):
+    def parse_args_ok(self, args=None, options=None):
         """Call optparse.parse_args, but return a triple:
 
         (ok, options, args)
@@ -171,70 +187,44 @@
         raise self.OptionParserError
 
 
-class ClassicOptionParser(CoverageOptionParser):
-    """Command-line parser for coverage.py classic arguments."""
+class GlobalOptionParser(CoverageOptionParser):
+    """Command-line parser for coverage.py global option arguments."""
 
     def __init__(self):
-        super(ClassicOptionParser, self).__init__()
-
-        self.add_action('-a', '--annotate', 'annotate')
-        self.add_action('-b', '--html', 'html')
-        self.add_action('-c', '--combine', 'combine')
-        self.add_action('-e', '--erase', 'erase')
-        self.add_action('-r', '--report', 'report')
-        self.add_action('-x', '--execute', 'execute')
+        super(GlobalOptionParser, self).__init__()
 
         self.add_options([
-            Opts.directory,
             Opts.help,
-            Opts.ignore_errors,
-            Opts.pylib,
-            Opts.show_missing,
-            Opts.old_omit,
-            Opts.parallel_mode,
-            Opts.timid,
             Opts.version,
         ])
 
-    def add_action(self, dash, dashdash, action_code):
-        """Add a specialized option that is the action to execute."""
-        option = self.add_option(dash, dashdash, action='callback',
-            callback=self._append_action
-            )
-        option.action_code = action_code
-
-    def _append_action(self, option, opt_unused, value_unused, parser):
-        """Callback for an option that adds to the `actions` list."""
-        parser.values.actions.append(option.action_code)
-
 
 class CmdOptionParser(CoverageOptionParser):
     """Parse one of the new-style commands for coverage.py."""
 
     def __init__(self, action, options=None, defaults=None, usage=None,
-                cmd=None, description=None
+                description=None
                 ):
-        """Create an OptionParser for a coverage command.
+        """Create an OptionParser for a coverage.py command.
 
-        `action` is the slug to put into `options.actions`.
+        `action` is the slug to put into `options.action`.
         `options` is a list of Option's for the command.
         `defaults` is a dict of default value for options.
         `usage` is the usage string to display in help.
-        `cmd` is the command name, if different than `action`.
         `description` is the description of the command, for the help text.
 
         """
         if usage:
             usage = "%prog " + usage
         super(CmdOptionParser, self).__init__(
-            prog="coverage %s" % (cmd or action),
+            prog="coverage %s" % action,
             usage=usage,
             description=description,
         )
-        self.set_defaults(actions=[action], **(defaults or {}))
+        self.set_defaults(action=action, **(defaults or {}))
         if options:
             self.add_options(options)
-        self.cmd = cmd or action
+        self.cmd = action
 
     def __eq__(self, other):
         # A convenience equality, so that I can put strings in unit test
@@ -242,8 +232,9 @@
         return (other == "<CmdOptionParser:%s>" % self.cmd)
 
 GLOBAL_ARGS = [
+    Opts.debug,
+    Opts.help,
     Opts.rcfile,
-    Opts.help,
     ]
 
 CMDS = {
@@ -251,8 +242,8 @@
         [
             Opts.directory,
             Opts.ignore_errors,
+            Opts.include,
             Opts.omit,
-            Opts.include,
             ] + GLOBAL_ARGS,
         usage = "[options] [modules]",
         description = "Make annotated copies of the given files, marking "
@@ -261,10 +252,13 @@
         ),
 
     'combine': CmdOptionParser("combine", GLOBAL_ARGS,
-        usage = " ",
+        usage = "<path1> <path2> ... <pathN>",
         description = "Combine data from multiple coverage files collected "
             "with 'run -p'.  The combined results are written to a single "
-            "file representing the union of the data."
+            "file representing the union of the data. The positional "
+            "arguments are data files or directories containing data files. "
+            "If no paths are provided, data files in the default data file's "
+            "directory are combined."
         ),
 
     'debug': CmdOptionParser("debug", GLOBAL_ARGS,
@@ -290,8 +284,8 @@
             Opts.directory,
             Opts.fail_under,
             Opts.ignore_errors,
+            Opts.include,
             Opts.omit,
-            Opts.include,
             Opts.title,
             ] + GLOBAL_ARGS,
         usage = "[options] [modules]",
@@ -304,29 +298,28 @@
         [
             Opts.fail_under,
             Opts.ignore_errors,
+            Opts.include,
             Opts.omit,
-            Opts.include,
             Opts.show_missing,
+            Opts.skip_covered,
             ] + GLOBAL_ARGS,
         usage = "[options] [modules]",
         description = "Report coverage statistics on modules."
         ),
 
-    'run': CmdOptionParser("execute",
+    'run': CmdOptionParser("run",
         [
             Opts.append,
             Opts.branch,
-            Opts.debug,
+            Opts.concurrency,
+            Opts.include,
+            Opts.module,
+            Opts.omit,
             Opts.pylib,
             Opts.parallel_mode,
-            Opts.module,
+            Opts.source,
             Opts.timid,
-            Opts.source,
-            Opts.omit,
-            Opts.include,
             ] + GLOBAL_ARGS,
-        defaults = {'erase_first': True},
-        cmd = "run",
         usage = "[options] <pyfile> [program options]",
         description = "Run a Python program, measuring code execution."
         ),
@@ -335,11 +328,10 @@
         [
             Opts.fail_under,
             Opts.ignore_errors,
+            Opts.include,
             Opts.omit,
-            Opts.include,
             Opts.output_xml,
             ] + GLOBAL_ARGS,
-        cmd = "xml",
         usage = "[options] [modules]",
         description = "Generate an XML report of coverage results."
         ),
@@ -350,27 +342,28 @@
 
 
 class CoverageScript(object):
-    """The command-line interface to Coverage."""
+    """The command-line interface to coverage.py."""
 
     def __init__(self, _covpkg=None, _run_python_file=None,
-                 _run_python_module=None, _help_fn=None):
+                 _run_python_module=None, _help_fn=None, _path_exists=None):
         # _covpkg is for dependency injection, so we can test this code.
         if _covpkg:
             self.covpkg = _covpkg
         else:
-            from . import coverage
+            import coverage
             self.covpkg = coverage
 
         # For dependency injection:
         self.run_python_file = _run_python_file or run_python_file
         self.run_python_module = _run_python_module or run_python_module
         self.help_fn = _help_fn or self.help
-        self.classic = False
+        self.path_exists = _path_exists or os.path.exists
+        self.global_option = False
 
         self.coverage = None
 
     def command_line(self, argv):
-        """The bulk of the command line interface to Coverage.
+        """The bulk of the command line interface to coverage.py.
 
         `argv` is the argument list to process.
 
@@ -382,11 +375,11 @@
             self.help_fn(topic='minimum_help')
             return OK
 
-        # The command syntax we parse depends on the first argument.  Classic
-        # syntax always starts with an option.
-        self.classic = argv[0].startswith('-')
-        if self.classic:
-            parser = ClassicOptionParser()
+        # The command syntax we parse depends on the first argument.  Global
+        # switch syntax always starts with an option.
+        self.global_option = argv[0].startswith('-')
+        if self.global_option:
+            parser = GlobalOptionParser()
         else:
             parser = CMDS.get(argv[0])
             if not parser:
@@ -395,7 +388,7 @@
             argv = argv[1:]
 
         parser.help_fn = self.help_fn
-        ok, options, args = parser.parse_args(argv)
+        ok, options, args = parser.parse_args_ok(argv)
         if not ok:
             return ERR
 
@@ -407,6 +400,10 @@
         if not self.args_ok(options, args):
             return ERR
 
+        # We need to be able to import from the current directory, because
+        # plugins may try to, for example, to read Django settings.
+        sys.path[0] = ''
+
         # Listify the list options.
         source = unshell_list(options.source)
         omit = unshell_list(options.omit)
@@ -424,52 +421,74 @@
             omit = omit,
             include = include,
             debug = debug,
+            concurrency = options.concurrency,
             )
 
-        if 'debug' in options.actions:
+        if options.action == "debug":
             return self.do_debug(args)
 
-        if 'erase' in options.actions or options.erase_first:
+        elif options.action == "erase":
             self.coverage.erase()
-        else:
-            self.coverage.load()
+            return OK
+
+        elif options.action == "run":
+            return self.do_run(options, args)
 
-        if 'execute' in options.actions:
-            self.do_execute(options, args)
-
-        if 'combine' in options.actions:
-            self.coverage.combine()
+        elif options.action == "combine":
+            self.coverage.load()
+            data_dirs = args or None
+            self.coverage.combine(data_dirs)
             self.coverage.save()
+            return OK
 
         # Remaining actions are reporting, with some common options.
         report_args = dict(
-            morfs = args,
+            morfs = unglob_args(args),
             ignore_errors = options.ignore_errors,
             omit = omit,
             include = include,
             )
 
-        if 'report' in options.actions:
+        self.coverage.load()
+
+        total = None
+        if options.action == "report":
             total = self.coverage.report(
-                show_missing=options.show_missing, **report_args)
-        if 'annotate' in options.actions:
+                show_missing=options.show_missing,
+                skip_covered=options.skip_covered, **report_args)
+        elif options.action == "annotate":
             self.coverage.annotate(
                 directory=options.directory, **report_args)
-        if 'html' in options.actions:
+        elif options.action == "html":
             total = self.coverage.html_report(
                 directory=options.directory, title=options.title,
                 **report_args)
-        if 'xml' in options.actions:
+        elif options.action == "xml":
             outfile = options.outfile
             total = self.coverage.xml_report(outfile=outfile, **report_args)
 
-        if options.fail_under is not None:
-            if total >= options.fail_under:
-                return OK
-            else:
-                return FAIL_UNDER
-        else:
-            return OK
+        if total is not None:
+            # Apply the command line fail-under options, and then use the config
+            # value, so we can get fail_under from the config file.
+            if options.fail_under is not None:
+                self.coverage.set_option("report:fail_under", options.fail_under)
+
+            if self.coverage.get_option("report:fail_under"):
+
+                # Total needs to be rounded, but be careful of 0 and 100.
+                if 0 < total < 1:
+                    total = 1
+                elif 99 < total < 100:
+                    total = 99
+                else:
+                    total = round(total)
+
+                if total >= self.coverage.get_option("report:fail_under"):
+                    return OK
+                else:
+                    return FAIL_UNDER
+
+        return OK
 
     def help(self, error=None, topic=None, parser=None):
         """Display an error message, or the named topic."""
@@ -494,13 +513,13 @@
         """
         # Handle help.
         if options.help:
-            if self.classic:
+            if self.global_option:
                 self.help_fn(topic='help')
             else:
                 self.help_fn(parser=parser)
             return True
 
-        if "help" in options.actions:
+        if options.action == "help":
             if args:
                 for a in args:
                     parser = CMDS.get(a)
@@ -522,67 +541,48 @@
     def args_ok(self, options, args):
         """Check for conflicts and problems in the options.
 
-        Returns True if everything is ok, or False if not.
+        Returns True if everything is OK, or False if not.
 
         """
-        for i in ['erase', 'execute']:
-            for j in ['annotate', 'html', 'report', 'combine']:
-                if (i in options.actions) and (j in options.actions):
-                    self.help_fn("You can't specify the '%s' and '%s' "
-                              "options at the same time." % (i, j))
-                    return False
-
-        if not options.actions:
-            self.help_fn(
-                "You must specify at least one of -e, -x, -c, -r, -a, or -b."
-                )
-            return False
-        args_allowed = (
-            'execute' in options.actions or
-            'annotate' in options.actions or
-            'html' in options.actions or
-            'debug' in options.actions or
-            'report' in options.actions or
-            'xml' in options.actions
-            )
-        if not args_allowed and args:
-            self.help_fn("Unexpected arguments: %s" % " ".join(args))
-            return False
-
-        if 'execute' in options.actions and not args:
+        if options.action == "run" and not args:
             self.help_fn("Nothing to do.")
             return False
 
         return True
 
-    def do_execute(self, options, args):
+    def do_run(self, options, args):
         """Implementation of 'coverage run'."""
 
-        # Set the first path element properly.
-        old_path0 = sys.path[0]
+        if options.append and self.coverage.get_option("run:parallel"):
+            self.help_fn("Can't append to data files in parallel mode.")
+            return ERR
+
+        if not self.coverage.get_option("run:parallel"):
+            if not options.append:
+                self.coverage.erase()
 
         # Run the script.
         self.coverage.start()
         code_ran = True
         try:
-            try:
-                if options.module:
-                    sys.path[0] = ''
-                    self.run_python_module(args[0], args)
-                else:
-                    filename = args[0]
-                    sys.path[0] = os.path.abspath(os.path.dirname(filename))
-                    self.run_python_file(filename, args)
-            except NoSource:
-                code_ran = False
-                raise
+            if options.module:
+                self.run_python_module(args[0], args)
+            else:
+                filename = args[0]
+                self.run_python_file(filename, args)
+        except NoSource:
+            code_ran = False
+            raise
         finally:
             self.coverage.stop()
             if code_ran:
+                if options.append:
+                    data_file = self.coverage.get_option("run:data_file")
+                    if self.path_exists(data_file):
+                        self.coverage.combine(data_paths=[data_file])
                 self.coverage.save()
 
-            # Restore the old path
-            sys.path[0] = old_path0
+        return OK
 
     def do_debug(self, args):
         """Implementation of 'coverage debug'."""
@@ -590,27 +590,35 @@
         if not args:
             self.help_fn("What information would you like: data, sys?")
             return ERR
+
         for info in args:
             if info == 'sys':
-                print("-- sys ----------------------------------------")
-                for line in info_formatter(self.coverage.sysinfo()):
+                sys_info = self.coverage.sys_info()
+                print(info_header("sys"))
+                for line in info_formatter(sys_info):
                     print(" %s" % line)
             elif info == 'data':
-                print("-- data ---------------------------------------")
                 self.coverage.load()
-                print("path: %s" % self.coverage.data.filename)
-                print("has_arcs: %r" % self.coverage.data.has_arcs())
-                summary = self.coverage.data.summary(fullpath=True)
-                if summary:
+                data = self.coverage.data
+                print(info_header("data"))
+                print("path: %s" % self.coverage.data_files.filename)
+                if data:
+                    print("has_arcs: %r" % data.has_arcs())
+                    summary = data.line_counts(fullpath=True)
                     filenames = sorted(summary.keys())
                     print("\n%d files:" % len(filenames))
                     for f in filenames:
-                        print("%s: %d lines" % (f, summary[f]))
+                        line = "%s: %d lines" % (f, summary[f])
+                        plugin = data.file_tracer(f)
+                        if plugin:
+                            line += " [%s]" % plugin
+                        print(line)
                 else:
                     print("No data collected")
             else:
                 self.help_fn("Don't know what you mean by %r" % info)
                 return ERR
+
         return OK
 
 
@@ -618,64 +626,30 @@
     """Turn a command-line argument into a list."""
     if not s:
         return None
-    if sys.platform == 'win32':
-        # When running coverage as coverage.exe, some of the behavior
+    if env.WINDOWS:
+        # When running coverage.py as coverage.exe, some of the behavior
         # of the shell is emulated: wildcards are expanded into a list of
-        # filenames.  So you have to single-quote patterns on the command
+        # file names.  So you have to single-quote patterns on the command
         # line, but (not) helpfully, the single quotes are included in the
         # argument, so we have to strip them off here.
         s = s.strip("'")
     return s.split(',')
 
 
-HELP_TOPICS = {
-# -------------------------
-'classic':
-r"""Coverage.py version %(__version__)s
-Measure, collect, and report on code coverage in Python programs.
-
-Usage:
-
-coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
-    Execute the module, passing the given command-line arguments, collecting
-    coverage data.  With the -p option, include the machine name and process
-    id in the .coverage file name.  With -L, measure coverage even inside the
-    Python installed library, which isn't done by default.  With --timid, use a
-    simpler but slower trace method.
-
-coverage -e
-    Erase collected coverage data.
-
-coverage -c
-    Combine data from multiple coverage files (as created by -p option above)
-    and store it into a single file representing the union of the coverage.
+def unglob_args(args):
+    """Interpret shell wildcards for platforms that need it."""
+    if env.WINDOWS:
+        globbed = []
+        for arg in args:
+            if '?' in arg or '*' in arg:
+                globbed.extend(glob.glob(arg))
+            else:
+                globbed.append(arg)
+        args = globbed
+    return args
 
-coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
-    Report on the statement coverage for the given files.  With the -m
-    option, show line numbers of the statements that weren't executed.
 
-coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...]
-    Create an HTML report of the coverage of the given files.  Each file gets
-    its own page, with the file listing decorated to show executed, excluded,
-    and missed lines.
-
-coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...]
-    Make annotated copies of the given files, marking statements that
-    are executed with > and statements that are missed with !.
-
--d DIR
-    Write output files for -b or -a to this directory.
-
--i  Ignore errors while reporting or annotating.
-
--o DIR,...
-    Omit reporting or annotating files when their filename path starts with
-    a directory listed in the omit list.
-    e.g. coverage -i -r -o c:\python25,lib\enthought\traits
-
-Coverage data is saved in the file .coverage by default.  Set the
-COVERAGE_FILE environment variable to save it somewhere else.
-""",
+HELP_TOPICS = {
 # -------------------------
 'help': """\
 Coverage.py, version %(__version__)s
@@ -694,8 +668,7 @@
     xml         Create an XML report of coverage results.
 
 Use "coverage help <command>" for detailed help on any command.
-Use "coverage help classic" for help on older command syntax.
-For more information, see %(__url__)s
+For full documentation, see %(__url__)s
 """,
 # -------------------------
 'minimum_help': """\
@@ -703,13 +676,14 @@
 """,
 # -------------------------
 'version': """\
-Coverage.py, version %(__version__)s.  %(__url__)s
+Coverage.py, version %(__version__)s.
+Documentation at %(__url__)s
 """,
 }
 
 
 def main(argv=None):
-    """The main entry point to Coverage.
+    """The main entry point to coverage.py.
 
     This is installed as the script entry point.
 
@@ -717,26 +691,19 @@
     if argv is None:
         argv = sys.argv[1:]
     try:
-        start = time.clock()
         status = CoverageScript().command_line(argv)
-        end = time.clock()
-        if 0:
-            print("time: %.3fs" % (end - start))
-    except ExceptionDuringRun:
+    except ExceptionDuringRun as err:
         # An exception was caught while running the product code.  The
         # sys.exc_info() return tuple is packed into an ExceptionDuringRun
         # exception.
-        _, err, _ = sys.exc_info()
         traceback.print_exception(*err.args)
         status = ERR
-    except CoverageException:
+    except CoverageException as err:
         # A controlled error inside coverage.py: print the message to the user.
-        _, err, _ = sys.exc_info()
         print(err)
         status = ERR
-    except SystemExit:
+    except SystemExit as err:
         # The user called `sys.exit()`.  Exit with their argument, if any.
-        _, err, _ = sys.exc_info()
         if err.args:
             status = err.args[0]
         else:
--- a/DebugClients/Python/coverage/codeunit.py	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,152 +0,0 @@
-"""Code unit (module) handling for Coverage."""
-
-import glob, os, sys
-
-from .backward import open_source, string_class, StringIO
-from .misc import CoverageException
-
-
-def code_unit_factory(morfs, file_locator):
-    """Construct a list of CodeUnits from polymorphic inputs.
-
-    `morfs` is a module or a filename, or a list of same.
-
-    `file_locator` is a FileLocator that can help resolve filenames.
-
-    Returns a list of CodeUnit objects.
-
-    """
-    # Be sure we have a list.
-    if not isinstance(morfs, (list, tuple)):
-        morfs = [morfs]
-
-    # On Windows, the shell doesn't expand wildcards.  Do it here.
-    globbed = []
-    for morf in morfs:
-        if isinstance(morf, string_class) and ('?' in morf or '*' in morf):
-            globbed.extend(glob.glob(morf))
-        else:
-            globbed.append(morf)
-    morfs = globbed
-
-    code_units = [CodeUnit(morf, file_locator) for morf in morfs]
-
-    return code_units
-
-
-class CodeUnit(object):
-    """Code unit: a filename or module.
-
-    Instance attributes:
-
-    `name` is a human-readable name for this code unit.
-    `filename` is the os path from which we can read the source.
-    `relative` is a boolean.
-
-    """
-    def __init__(self, morf, file_locator):
-        self.file_locator = file_locator
-
-        if hasattr(morf, '__file__'):
-            f = morf.__file__
-        else:
-            f = morf
-        # .pyc files should always refer to a .py instead.
-        if f.endswith('.pyc') or f.endswith('.pyo'):
-            f = f[:-1]
-        elif f.endswith('$py.class'): # Jython
-            f = f[:-9] + ".py"
-        self.filename = self.file_locator.canonical_filename(f)
-        if isinstance(self.filename, unicode):
-            self.filename = self.filename.encode(sys.getfilesystemencoding())
-
-        if hasattr(morf, '__name__'):
-            n = modname = morf.__name__
-            self.relative = True
-        else:
-            n = os.path.splitext(morf)[0]
-            rel = self.file_locator.relative_filename(n)
-            if isinstance(rel, unicode):
-                rel = rel.encode(sys.getfilesystemencoding())
-            if os.path.isabs(n):
-                self.relative = (rel != n)
-            else:
-                self.relative = True
-            n = rel
-            modname = None
-        self.name = n
-        self.modname = modname
-
-    def __repr__(self):
-        return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename)
-
-    # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
-    # of them defined.
-
-    def __lt__(self, other):
-        return self.name < other.name
-    def __le__(self, other):
-        return self.name <= other.name
-    def __eq__(self, other):
-        return self.name == other.name
-    def __ne__(self, other):
-        return self.name != other.name
-    def __gt__(self, other):
-        return self.name > other.name
-    def __ge__(self, other):
-        return self.name >= other.name
-
-    def flat_rootname(self):
-        """A base for a flat filename to correspond to this code unit.
-
-        Useful for writing files about the code where you want all the files in
-        the same directory, but need to differentiate same-named files from
-        different directories.
-
-        For example, the file a/b/c.py might return 'a_b_c'
-
-        """
-        if self.modname:
-            return self.modname.replace('.', '_')
-        else:
-            root = os.path.splitdrive(self.name)[1]
-            return root.replace('\\', '_').replace('/', '_').replace('.', '_')
-
-    def source_file(self):
-        """Return an open file for reading the source of the code unit."""
-        if os.path.exists(self.filename):
-            # A regular text file: open it.
-            return open_source(self.filename)
-
-        # Maybe it's in a zip file?
-        source = self.file_locator.get_zip_data(self.filename)
-        if source is not None:
-            return StringIO(source)
-
-        # Couldn't find source.
-        raise CoverageException(
-            "No source for code '%s'." % self.filename
-            )
-
-    def should_be_python(self):
-        """Does it seem like this file should contain Python?
-
-        This is used to decide if a file reported as part of the exection of
-        a program was really likely to have contained Python in the first
-        place.
-
-        """
-        # Get the file extension.
-        _, ext = os.path.splitext(self.filename)
-
-        # Anything named *.py* should be Python.
-        if ext.startswith('.py'):
-            return True
-        # A file with no extension should be Python.
-        if not ext:
-            return True
-        # Everything else is probably not Python.
-        return False
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/collector.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/collector.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,152 +1,36 @@
-"""Raw data collector for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Raw data collector for coverage.py."""
 
-import os, sys, threading
+import os, sys
+
+from coverage import env
+from coverage.backward import iitems
+from coverage.files import abs_file
+from coverage.misc import CoverageException
+from coverage.pytracer import PyTracer
 
 try:
     # Use the C extension code when we can, for speed.
-    from .tracer import CTracer         # pylint: disable=F0401,E0611
+    from coverage.tracer import CTracer, CFileDisposition   # pylint: disable=no-name-in-module
 except ImportError:
     # Couldn't import the C extension, maybe it isn't built.
     if os.getenv('COVERAGE_TEST_TRACER') == 'c':
-        # During testing, we use the COVERAGE_TEST_TRACER env var to indicate
-        # that we've fiddled with the environment to test this fallback code.
-        # If we thought we had a C tracer, but couldn't import it, then exit
-        # quickly and clearly instead of dribbling confusing errors. I'm using
-        # sys.exit here instead of an exception because an exception here
-        # causes all sorts of other noise in unittest.
-        sys.stderr.write(
-            "*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n"
-            )
+        # During testing, we use the COVERAGE_TEST_TRACER environment variable
+        # to indicate that we've fiddled with the environment to test this
+        # fallback code.  If we thought we had a C tracer, but couldn't import
+        # it, then exit quickly and clearly instead of dribbling confusing
+        # errors. I'm using sys.exit here instead of an exception because an
+        # exception here causes all sorts of other noise in unittest.
+        sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
         sys.exit(1)
     CTracer = None
 
 
-class PyTracer(object):
-    """Python implementation of the raw data tracer."""
-
-    # Because of poor implementations of trace-function-manipulating tools,
-    # the Python trace function must be kept very simple.  In particular, there
-    # must be only one function ever set as the trace function, both through
-    # sys.settrace, and as the return value from the trace function.  Put
-    # another way, the trace function must always return itself.  It cannot
-    # swap in other functions, or return None to avoid tracing a particular
-    # frame.
-    #
-    # The trace manipulator that introduced this restriction is DecoratorTools,
-    # which sets a trace function, and then later restores the pre-existing one
-    # by calling sys.settrace with a function it found in the current frame.
-    #
-    # Systems that use DecoratorTools (or similar trace manipulations) must use
-    # PyTracer to get accurate results.  The command-line --timid argument is
-    # used to force the use of this tracer.
-
-    def __init__(self):
-        self.data = None
-        self.should_trace = None
-        self.should_trace_cache = None
-        self.warn = None
-        self.cur_file_data = None
-        self.last_line = 0
-        self.data_stack = []
-        self.last_exc_back = None
-        self.last_exc_firstlineno = 0
-        self.arcs = False
-        self.thread = None
-        self.stopped = False
-
-    def _trace(self, frame, event, arg_unused):
-        """The trace function passed to sys.settrace."""
-
-        if self.stopped:
-            return
-
-        if 0:
-            sys.stderr.write("trace event: %s %r @%d\n" % (
-                event, frame.f_code.co_filename, frame.f_lineno
-            ))
-
-        if self.last_exc_back:
-            if frame == self.last_exc_back:
-                # Someone forgot a return event.
-                if self.arcs and self.cur_file_data:
-                    pair = (self.last_line, -self.last_exc_firstlineno)
-                    self.cur_file_data[pair] = None
-                self.cur_file_data, self.last_line = self.data_stack.pop()
-            self.last_exc_back = None
-
-        if event == 'call':
-            # Entering a new function context.  Decide if we should trace
-            # in this file.
-            self.data_stack.append((self.cur_file_data, self.last_line))
-            filename = frame.f_code.co_filename
-            if filename not in self.should_trace_cache:
-                tracename = self.should_trace(filename, frame)
-                self.should_trace_cache[filename] = tracename
-            else:
-                tracename = self.should_trace_cache[filename]
-            #print("called, stack is %d deep, tracename is %r" % (
-            #               len(self.data_stack), tracename))
-            if tracename:
-                if tracename not in self.data:
-                    self.data[tracename] = {}
-                self.cur_file_data = self.data[tracename]
-            else:
-                self.cur_file_data = None
-            # Set the last_line to -1 because the next arc will be entering a
-            # code block, indicated by (-1, n).
-            self.last_line = -1
-        elif event == 'line':
-            # Record an executed line.
-            if self.cur_file_data is not None:
-                if self.arcs:
-                    #print("lin", self.last_line, frame.f_lineno)
-                    self.cur_file_data[(self.last_line, frame.f_lineno)] = None
-                else:
-                    #print("lin", frame.f_lineno)
-                    self.cur_file_data[frame.f_lineno] = None
-            self.last_line = frame.f_lineno
-        elif event == 'return':
-            if self.arcs and self.cur_file_data:
-                first = frame.f_code.co_firstlineno
-                self.cur_file_data[(self.last_line, -first)] = None
-            # Leaving this function, pop the filename stack.
-            self.cur_file_data, self.last_line = self.data_stack.pop()
-            #print("returned, stack is %d deep" % (len(self.data_stack)))
-        elif event == 'exception':
-            #print("exc", self.last_line, frame.f_lineno)
-            self.last_exc_back = frame.f_back
-            self.last_exc_firstlineno = frame.f_code.co_firstlineno
-        return self._trace
-
-    def start(self):
-        """Start this Tracer.
-
-        Return a Python function suitable for use with sys.settrace().
-
-        """
-        self.thread = threading.currentThread()
-        sys.settrace(self._trace)
-        return self._trace
-
-    def stop(self):
-        """Stop this Tracer."""
-        self.stopped = True
-        if self.thread != threading.currentThread():
-            # Called on a different thread than started us: we can't unhook
-            # ourseves, but we've set the flag that we should stop, so we won't
-            # do any more tracing.
-            return
-
-        if hasattr(sys, "gettrace") and self.warn:
-            if sys.gettrace() != self._trace:
-                msg = "Trace function changed, measurement is likely wrong: %r"
-                self.warn(msg % (sys.gettrace(),))
-        #print("Stopping tracer on %s" % threading.current_thread().ident)
-        sys.settrace(None)
-
-    def get_stats(self):
-        """Return a dictionary of statistics, or None."""
-        return None
+class FileDisposition(object):
+    """A simple value type for recording what to do with a file."""
+    pass
 
 
 class Collector(object):
@@ -170,12 +54,14 @@
     # the top, and resumed when they become the top again.
     _collectors = []
 
-    def __init__(self, should_trace, timid, branch, warn):
+    def __init__(self, should_trace, check_include, timid, branch, warn, concurrency):
         """Create a collector.
 
-        `should_trace` is a function, taking a filename, and returning a
-        canonicalized filename, or None depending on whether the file should
-        be traced or not.
+        `should_trace` is a function, taking a file name, and returning a
+        `coverage.FileDisposition object`.
+
+        `check_include` is a function taking a file name and a frame. It returns
+        a boolean: True if the file should be traced, False if not.
 
         If `timid` is true, then a slower simpler trace function will be
         used.  This is important for some environments where manipulation of
@@ -189,10 +75,43 @@
         `warn` is a warning function, taking a single string message argument,
         to be used if a warning needs to be issued.
 
+        `concurrency` is a string indicating the concurrency library in use.
+        Valid values are "greenlet", "eventlet", "gevent", or "thread" (the
+        default).
+
         """
         self.should_trace = should_trace
+        self.check_include = check_include
         self.warn = warn
         self.branch = branch
+        self.threading = None
+        self.concurrency = concurrency
+
+        self.concur_id_func = None
+
+        try:
+            if concurrency == "greenlet":
+                import greenlet
+                self.concur_id_func = greenlet.getcurrent
+            elif concurrency == "eventlet":
+                import eventlet.greenthread     # pylint: disable=import-error,useless-suppression
+                self.concur_id_func = eventlet.greenthread.getcurrent
+            elif concurrency == "gevent":
+                import gevent                   # pylint: disable=import-error,useless-suppression
+                self.concur_id_func = gevent.getcurrent
+            elif concurrency == "thread" or not concurrency:
+                # It's important to import threading only if we need it.  If
+                # it's imported early, and the program being measured uses
+                # gevent, then gevent's monkey-patching won't work properly.
+                import threading
+                self.threading = threading
+            else:
+                raise CoverageException("Don't understand concurrency=%s" % concurrency)
+        except ImportError:
+            raise CoverageException(
+                "Couldn't trace with concurrency=%s, the module isn't installed." % concurrency
+            )
+
         self.reset()
 
         if timid:
@@ -203,8 +122,15 @@
             # trace function.
             self._trace_class = CTracer or PyTracer
 
+        if self._trace_class is CTracer:
+            self.file_disposition_class = CFileDisposition
+            self.supports_plugins = True
+        else:
+            self.file_disposition_class = FileDisposition
+            self.supports_plugins = False
+
     def __repr__(self):
-        return "<Collector at 0x%x>" % id(self)
+        return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
 
     def tracer_name(self):
         """Return the class name of the tracer we're using."""
@@ -212,14 +138,42 @@
 
     def reset(self):
         """Clear collected data, and prepare to collect more."""
-        # A dictionary mapping filenames to dicts with linenumber keys,
-        # or mapping filenames to dicts with linenumber pairs as keys.
+        # A dictionary mapping file names to dicts with line number keys (if not
+        # branch coverage), or mapping file names to dicts with line number
+        # pairs as keys (if branch coverage).
         self.data = {}
 
-        # A cache of the results from should_trace, the decision about whether
-        # to trace execution in a file. A dict of filename to (filename or
-        # None).
-        self.should_trace_cache = {}
+        # A dictionary mapping file names to file tracer plugin names that will
+        # handle them.
+        self.file_tracers = {}
+
+        # The .should_trace_cache attribute is a cache from file names to
+        # coverage.FileDisposition objects, or None.  When a file is first
+        # considered for tracing, a FileDisposition is obtained from
+        # Coverage.should_trace.  Its .trace attribute indicates whether the
+        # file should be traced or not.  If it should be, a plugin with dynamic
+        # file names can decide not to trace it based on the dynamic file name
+        # being excluded by the inclusion rules, in which case the
+        # FileDisposition will be replaced by None in the cache.
+        if env.PYPY:
+            import __pypy__                     # pylint: disable=import-error
+            # Alex Gaynor said:
+            # should_trace_cache is a strictly growing key: once a key is in
+            # it, it never changes.  Further, the keys used to access it are
+            # generally constant, given sufficient context. That is to say, at
+            # any given point _trace() is called, pypy is able to know the key.
+            # This is because the key is determined by the physical source code
+            # line, and that's invariant with the call site.
+            #
+            # This property of a dict with immutable keys, combined with
+            # call-site-constant keys is a match for PyPy's module dict,
+            # which is optimized for such workloads.
+            #
+            # This gives a 20% benefit on the workload described at
+            # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
+            self.should_trace_cache = __pypy__.newdict("module")
+        else:
+            self.should_trace_cache = {}
 
         # Our active Tracers.
         self.tracers = []
@@ -228,12 +182,30 @@
         """Start a new Tracer object, and store it in self.tracers."""
         tracer = self._trace_class()
         tracer.data = self.data
-        tracer.arcs = self.branch
+        tracer.trace_arcs = self.branch
         tracer.should_trace = self.should_trace
         tracer.should_trace_cache = self.should_trace_cache
         tracer.warn = self.warn
+
+        if hasattr(tracer, 'concur_id_func'):
+            tracer.concur_id_func = self.concur_id_func
+        elif self.concur_id_func:
+            raise CoverageException(
+                "Can't support concurrency=%s with %s, only threads are supported" % (
+                    self.concurrency, self.tracer_name(),
+                )
+            )
+
+        if hasattr(tracer, 'file_tracers'):
+            tracer.file_tracers = self.file_tracers
+        if hasattr(tracer, 'threading'):
+            tracer.threading = self.threading
+        if hasattr(tracer, 'check_include'):
+            tracer.check_include = self.check_include
+
         fn = tracer.start()
         self.tracers.append(tracer)
+
         return fn
 
     # The trace function has to be set individually on each thread before
@@ -242,16 +214,16 @@
     # install this as a trace function, and the first time it's called, it does
     # the real trace installation.
 
-    def _installation_trace(self, frame_unused, event_unused, arg_unused):
+    def _installation_trace(self, frame, event, arg):
         """Called on new threads, installs the real tracer."""
-        # Remove ourselves as the trace function
+        # Remove ourselves as the trace function.
         sys.settrace(None)
         # Install the real tracer.
         fn = self._start_tracer()
         # Invoke the real trace function with the current event, to be sure
         # not to lose an event.
         if fn:
-            fn = fn(frame_unused, event_unused, arg_unused)
+            fn = fn(frame, event, arg)
         # Return the new trace function to continue tracing in this scope.
         return fn
 
@@ -259,41 +231,47 @@
         """Start collecting trace information."""
         if self._collectors:
             self._collectors[-1].pause()
-        self._collectors.append(self)
-        #print("Started: %r" % self._collectors, file=sys.stderr)
 
-        # Check to see whether we had a fullcoverage tracer installed.
+        # Check to see whether we had a fullcoverage tracer installed. If so,
+        # get the stack frames it stashed away for us.
         traces0 = []
-        if hasattr(sys, "gettrace"):
-            fn0 = sys.gettrace()
-            if fn0:
-                tracer0 = getattr(fn0, '__self__', None)
-                if tracer0:
-                    traces0 = getattr(tracer0, 'traces', [])
+        fn0 = sys.gettrace()
+        if fn0:
+            tracer0 = getattr(fn0, '__self__', None)
+            if tracer0:
+                traces0 = getattr(tracer0, 'traces', [])
 
-        # Install the tracer on this thread.
-        fn = self._start_tracer()
+        try:
+            # Install the tracer on this thread.
+            fn = self._start_tracer()
+        except:
+            if self._collectors:
+                self._collectors[-1].resume()
+            raise
 
+        # If _start_tracer succeeded, then we add ourselves to the global
+        # stack of collectors.
+        self._collectors.append(self)
+
+        # Replay all the events from fullcoverage into the new trace function.
         for args in traces0:
             (frame, event, arg), lineno = args
             try:
                 fn(frame, event, arg, lineno=lineno)
             except TypeError:
-                raise Exception(
-                    "fullcoverage must be run with the C trace function."
-                )
+                raise Exception("fullcoverage must be run with the C trace function.")
 
         # Install our installation tracer in threading, to jump start other
         # threads.
-        threading.settrace(self._installation_trace)
+        if self.threading:
+            self.threading.settrace(self._installation_trace)
 
     def stop(self):
         """Stop collecting trace information."""
-        #print("Stopping: %r" % self._collectors, file=sys.stderr)
-        if not self._collectors:
-            return
         assert self._collectors
-        assert self._collectors[-1] is self
+        assert self._collectors[-1] is self, (
+            "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
+        )
 
         self.pause()
         self.tracers = []
@@ -313,46 +291,35 @@
                 print("\nCoverage.py tracer stats:")
                 for k in sorted(stats.keys()):
                     print("%16s: %s" % (k, stats[k]))
-        threading.settrace(None)
+        if self.threading:
+            self.threading.settrace(None)
 
     def resume(self):
         """Resume tracing after a `pause`."""
         for tracer in self.tracers:
             tracer.start()
-        threading.settrace(self._installation_trace)
+        if self.threading:
+            self.threading.settrace(self._installation_trace)
+        else:
+            self._start_tracer()
 
-    def get_line_data(self):
-        """Return the line data collected.
+    def save_data(self, covdata):
+        """Save the collected data to a `CoverageData`.
 
-        Data is { filename: { lineno: None, ...}, ...}
+        Also resets the collector.
 
         """
-        if self.branch:
-            # If we were measuring branches, then we have to re-build the dict
-            # to show line data.
-            line_data = {}
-            for f, arcs in self.data.items():
-                line_data[f] = ldf = {}
-                for l1, _ in list(arcs.keys()):
-                    if l1:
-                        ldf[l1] = None
-            return line_data
-        else:
-            return self.data
+        def abs_file_dict(d):
+            """Return a dict like d, but with keys modified by `abs_file`."""
+            return dict((abs_file(k), v) for k, v in iitems(d))
 
-    def get_arc_data(self):
-        """Return the arc data collected.
-
-        Data is { filename: { (l1, l2): None, ...}, ...}
+        if self.branch:
+            covdata.add_arcs(abs_file_dict(self.data))
+        else:
+            covdata.add_lines(abs_file_dict(self.data))
+        covdata.add_file_tracers(abs_file_dict(self.file_tracers))
 
-        Note that no data is collected or returned if the Collector wasn't
-        created with `branch` true.
-
-        """
-        if self.branch:
-            return self.data
-        else:
-            return {}
+        self.reset()
 
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/config.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/config.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,31 +1,66 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Config file for coverage.py"""
 
-import os, re, sys
-from .backward import string_class, iitems
+import collections
+import os
+import re
+import sys
 
-# In py3, # ConfigParser was renamed to the more-standard configparser
-try:
-    import configparser                             # pylint: disable=F0401
-except ImportError:
-    import ConfigParser as configparser
+from coverage.backward import configparser, iitems, string_class
+from coverage.misc import CoverageException
 
 
 class HandyConfigParser(configparser.RawConfigParser):
     """Our specialization of ConfigParser."""
 
+    def __init__(self, section_prefix):
+        configparser.RawConfigParser.__init__(self)
+        self.section_prefix = section_prefix
+
     def read(self, filename):
-        """Read a filename as UTF-8 configuration data."""
+        """Read a file name as UTF-8 configuration data."""
         kwargs = {}
         if sys.version_info >= (3, 2):
             kwargs['encoding'] = "utf-8"
         return configparser.RawConfigParser.read(self, filename, **kwargs)
 
-    def get(self, *args, **kwargs):
-        v = configparser.RawConfigParser.get(self, *args, **kwargs)
+    def has_option(self, section, option):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.has_option(self, section, option)
+
+    def has_section(self, section):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.has_section(self, section)
+
+    def options(self, section):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.options(self, section)
+
+    def get_section(self, section):
+        """Get the contents of a section, as a dictionary."""
+        d = {}
+        for opt in self.options(section):
+            d[opt] = self.get(section, opt)
+        return d
+
+    def get(self, section, *args, **kwargs):
+        """Get a value, replacing environment variables also.
+
+        The arguments are the same as `RawConfigParser.get`, but in the found
+        value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
+        environment variable ``WORD``.
+
+        Returns the finished value.
+
+        """
+        section = self.section_prefix + section
+        v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
         def dollar_replace(m):
             """Called for each $replacement."""
             # Only one of the groups will have matched, just get its text.
-            word = [w for w in m.groups() if w is not None][0]
+            word = next(w for w in m.groups() if w is not None)     # pragma: part covered
             if word == "$":
                 return "$"
             else:
@@ -59,28 +94,39 @@
                     values.append(value)
         return values
 
-    def getlinelist(self, section, option):
-        """Read a list of full-line strings.
+    def getregexlist(self, section, option):
+        """Read a list of full-line regexes.
 
         The value of `section` and `option` is treated as a newline-separated
-        list of strings.  Each value is stripped of whitespace.
+        list of regexes.  Each value is stripped of whitespace.
 
         Returns the list of strings.
 
         """
-        value_list = self.get(section, option)
-        return list(filter(None, value_list.split('\n')))
+        line_list = self.get(section, option)
+        value_list = []
+        for value in line_list.splitlines():
+            value = value.strip()
+            try:
+                re.compile(value)
+            except re.error as e:
+                raise CoverageException(
+                    "Invalid [%s].%s value %r: %s" % (section, option, value, e)
+                )
+            if value:
+                value_list.append(value)
+        return value_list
 
 
-# The default line exclusion regexes
+# The default line exclusion regexes.
 DEFAULT_EXCLUDE = [
-    '(?i)# *pragma[: ]*no *cover',
-    ]
+    r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
+]
 
 # The default partial branch regexes, to be modified by the user.
 DEFAULT_PARTIAL = [
-    '(?i)# *pragma[: ]*no *branch',
-    ]
+    r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
+]
 
 # The default partial branch regexes, based on Python semantics.
 # These are any Python branching constructs that can't actually execute all
@@ -88,7 +134,7 @@
 DEFAULT_PARTIAL_ALWAYS = [
     'while (True|1|False|0):',
     'if (True|1|False|0):',
-    ]
+]
 
 
 class CoverageConfig(object):
@@ -106,44 +152,44 @@
 
         # Defaults for [run]
         self.branch = False
+        self.concurrency = None
         self.cover_pylib = False
         self.data_file = ".coverage"
+        self.debug = []
+        self.note = None
         self.parallel = False
+        self.plugins = []
+        self.source = None
         self.timid = False
-        self.source = None
-        self.debug = []
 
         # Defaults for [report]
         self.exclude_list = DEFAULT_EXCLUDE[:]
+        self.fail_under = 0
         self.ignore_errors = False
         self.include = None
         self.omit = None
+        self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
         self.partial_list = DEFAULT_PARTIAL[:]
-        self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
         self.precision = 0
         self.show_missing = False
+        self.skip_covered = False
 
         # Defaults for [html]
+        self.extra_css = None
         self.html_dir = "htmlcov"
-        self.extra_css = None
         self.html_title = "Coverage report"
 
         # Defaults for [xml]
         self.xml_output = "coverage.xml"
+        self.xml_package_depth = 99
 
         # Defaults for [paths]
         self.paths = {}
 
-    def from_environment(self, env_var):
-        """Read configuration from the `env_var` environment variable."""
-        # Timidity: for nose users, read an environment variable.  This is a
-        # cheap hack, since the rest of the command line arguments aren't
-        # recognized, but it solves some users' problems.
-        env = os.environ.get(env_var, '')
-        if env:
-            self.timid = ('--timid' in env)
+        # Options for plugins
+        self.plugin_options = {}
 
-    MUST_BE_LIST = ["omit", "include", "debug"]
+    MUST_BE_LIST = ["omit", "include", "debug", "plugins"]
 
     def from_args(self, **kwargs):
         """Read config values from `kwargs`."""
@@ -153,64 +199,168 @@
                     v = [v]
                 setattr(self, k, v)
 
-    def from_file(self, filename):
+    def from_file(self, filename, section_prefix=""):
         """Read configuration from a .rc file.
 
         `filename` is a file name to read.
 
+        Returns True or False, whether the file could be read.
+
         """
         self.attempted_config_files.append(filename)
 
-        cp = HandyConfigParser()
-        files_read = cp.read(filename)
-        if files_read is not None:  # return value changed in 2.4
-            self.config_files.extend(files_read)
+        cp = HandyConfigParser(section_prefix)
+        try:
+            files_read = cp.read(filename)
+        except configparser.Error as err:
+            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
+        if not files_read:
+            return False
+
+        self.config_files.extend(files_read)
+
+        try:
+            for option_spec in self.CONFIG_FILE_OPTIONS:
+                self._set_attr_from_config_option(cp, *option_spec)
+        except ValueError as err:
+            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
 
+        # Check that there are no unrecognized options.
+        all_options = collections.defaultdict(set)
         for option_spec in self.CONFIG_FILE_OPTIONS:
-            self.set_attr_from_config_option(cp, *option_spec)
+            section, option = option_spec[1].split(":")
+            all_options[section].add(option)
+
+        for section, options in iitems(all_options):
+            if cp.has_section(section):
+                for unknown in set(cp.options(section)) - options:
+                    if section_prefix:
+                        section = section_prefix + section
+                    raise CoverageException(
+                        "Unrecognized option '[%s] %s=' in config file %s" % (
+                            section, unknown, filename
+                        )
+                    )
 
         # [paths] is special
         if cp.has_section('paths'):
             for option in cp.options('paths'):
                 self.paths[option] = cp.getlist('paths', option)
 
+        # plugins can have options
+        for plugin in self.plugins:
+            if cp.has_section(plugin):
+                self.plugin_options[plugin] = cp.get_section(plugin)
+
+        return True
+
     CONFIG_FILE_OPTIONS = [
+        # These are *args for _set_attr_from_config_option:
+        #   (attr, where, type_="")
+        #
+        #   attr is the attribute to set on the CoverageConfig object.
+        #   where is the section:name to read from the configuration file.
+        #   type_ is the optional type to apply, by using .getTYPE to read the
+        #       configuration value from the file.
+
         # [run]
         ('branch', 'run:branch', 'boolean'),
+        ('concurrency', 'run:concurrency'),
         ('cover_pylib', 'run:cover_pylib', 'boolean'),
         ('data_file', 'run:data_file'),
         ('debug', 'run:debug', 'list'),
         ('include', 'run:include', 'list'),
+        ('note', 'run:note'),
         ('omit', 'run:omit', 'list'),
         ('parallel', 'run:parallel', 'boolean'),
+        ('plugins', 'run:plugins', 'list'),
         ('source', 'run:source', 'list'),
         ('timid', 'run:timid', 'boolean'),
 
         # [report]
-        ('exclude_list', 'report:exclude_lines', 'linelist'),
+        ('exclude_list', 'report:exclude_lines', 'regexlist'),
+        ('fail_under', 'report:fail_under', 'int'),
         ('ignore_errors', 'report:ignore_errors', 'boolean'),
         ('include', 'report:include', 'list'),
         ('omit', 'report:omit', 'list'),
-        ('partial_list', 'report:partial_branches', 'linelist'),
-        ('partial_always_list', 'report:partial_branches_always', 'linelist'),
+        ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
+        ('partial_list', 'report:partial_branches', 'regexlist'),
         ('precision', 'report:precision', 'int'),
         ('show_missing', 'report:show_missing', 'boolean'),
+        ('skip_covered', 'report:skip_covered', 'boolean'),
 
         # [html]
+        ('extra_css', 'html:extra_css'),
         ('html_dir', 'html:directory'),
-        ('extra_css', 'html:extra_css'),
         ('html_title', 'html:title'),
 
         # [xml]
         ('xml_output', 'xml:output'),
-        ]
+        ('xml_package_depth', 'xml:package_depth', 'int'),
+    ]
 
-    def set_attr_from_config_option(self, cp, attr, where, type_=''):
+    def _set_attr_from_config_option(self, cp, attr, where, type_=''):
         """Set an attribute on self if it exists in the ConfigParser."""
         section, option = where.split(":")
         if cp.has_option(section, option):
-            method = getattr(cp, 'get'+type_)
+            method = getattr(cp, 'get' + type_)
             setattr(self, attr, method(section, option))
 
+    def get_plugin_options(self, plugin):
+        """Get a dictionary of options for the plugin named `plugin`."""
+        return self.plugin_options.get(plugin, {})
+
+    def set_option(self, option_name, value):
+        """Set an option in the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        `value` is the new value for the option.
+
+        """
+
+        # Check all the hard-coded options.
+        for option_spec in self.CONFIG_FILE_OPTIONS:
+            attr, where = option_spec[:2]
+            if where == option_name:
+                setattr(self, attr, value)
+                return
+
+        # See if it's a plugin option.
+        plugin_name, _, key = option_name.partition(":")
+        if key and plugin_name in self.plugins:
+            self.plugin_options.setdefault(plugin_name, {})[key] = value
+            return
+
+        # If we get here, we didn't find the option.
+        raise CoverageException("No such option: %r" % option_name)
+
+    def get_option(self, option_name):
+        """Get an option from the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        Returns the value of the option.
+
+        """
+
+        # Check all the hard-coded options.
+        for option_spec in self.CONFIG_FILE_OPTIONS:
+            attr, where = option_spec[:2]
+            if where == option_name:
+                return getattr(self, attr)
+
+        # See if it's a plugin option.
+        plugin_name, _, key = option_name.partition(":")
+        if key and plugin_name in self.plugins:
+            return self.plugin_options.get(plugin_name, {}).get(key)
+
+        # If we get here, we didn't find the option.
+        raise CoverageException("No such option: %r" % option_name)
+
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/control.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/control.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,49 +1,65 @@
-"""Core control stuff for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Core control stuff for coverage.py."""
 
-import atexit, os, random, socket, sys
+import atexit
+import inspect
+import os
+import platform
+import sys
+import traceback
 
-from .annotate import AnnotateReporter
-from .backward import string_class, iitems, sorted  # pylint: disable=W0622
-from .codeunit import code_unit_factory, CodeUnit
-from .collector import Collector
-from .config import CoverageConfig
-from .data import CoverageData
-from .debug import DebugControl
-from .files import FileLocator, TreeMatcher, FnmatchMatcher
-from .files import PathAliases, find_python_files, prep_patterns
-#from .html import HtmlReporter     # Comment for eric6
-from .misc import CoverageException, bool_or_none, join_regex
-from .misc import file_be_gone
-from .results import Analysis, Numbers
-from .summary import SummaryReporter
-from .xmlreport import XmlReporter
+from coverage import env, files
+from coverage.annotate import AnnotateReporter
+from coverage.backward import string_class, iitems
+from coverage.collector import Collector
+from coverage.config import CoverageConfig
+from coverage.data import CoverageData, CoverageDataFiles
+from coverage.debug import DebugControl
+from coverage.files import TreeMatcher, FnmatchMatcher
+from coverage.files import PathAliases, find_python_files, prep_patterns
+from coverage.files import ModuleMatcher, abs_file
+from coverage.html import HtmlReporter
+from coverage.misc import CoverageException, bool_or_none, join_regex
+from coverage.misc import file_be_gone
+from coverage.monkey import patch_multiprocessing
+from coverage.plugin import FileReporter
+from coverage.plugin_support import Plugins
+from coverage.python import PythonFileReporter
+from coverage.results import Analysis, Numbers
+from coverage.summary import SummaryReporter
+from coverage.xmlreport import XmlReporter
+
 
 # Pypy has some unusual stuff in the "stdlib".  Consider those locations
 # when deciding where the stdlib is.
 try:
-    import _structseq       # pylint: disable=F0401
+    import _structseq
 except ImportError:
     _structseq = None
 
 
-class coverage(object):
+class Coverage(object):
     """Programmatic access to coverage.py.
 
     To use::
 
-        from . import coverage
+        from coverage import Coverage
 
-        cov = coverage()
+        cov = Coverage()
         cov.start()
         #.. call your code ..
         cov.stop()
         cov.html_report(directory='covhtml')
 
     """
-    def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
-                auto_data=False, timid=None, branch=None, config_file=True,
-                source=None, omit=None, include=None, debug=None,
-                debug_file=None):
+    def __init__(
+        self, data_file=None, data_suffix=None, cover_pylib=None,
+        auto_data=False, timid=None, branch=None, config_file=True,
+        source=None, omit=None, include=None, debug=None,
+        concurrency=None,
+    ):
         """
         `data_file` is the base name of the data file to use, defaulting to
         ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
@@ -65,324 +81,253 @@
         If `branch` is true, then branch coverage will be measured in addition
         to the usual statement coverage.
 
-        `config_file` determines what config file to read.  If it is a string,
-        it is the name of the config file to read.  If it is True, then a
-        standard file is read (".coveragerc").  If it is False, then no file is
-        read.
+        `config_file` determines what configuration file to read:
+
+            * If it is ".coveragerc", it is interpreted as if it were True,
+              for backward compatibility.
+
+            * If it is a string, it is the name of the file to read.  If the
+              file can't be read, it is an error.
+
+            * If it is True, then a few standard files names are tried
+              (".coveragerc", "setup.cfg").  It is not an error for these files
+              to not be found.
+
+            * If it is False, then no configuration file is read.
 
         `source` is a list of file paths or package names.  Only code located
         in the trees indicated by the file paths or package names will be
         measured.
 
-        `include` and `omit` are lists of filename patterns. Files that match
+        `include` and `omit` are lists of file name patterns. Files that match
         `include` will be measured, files that match `omit` will not.  Each
         will also accept a single string argument.
 
         `debug` is a list of strings indicating what debugging information is
-        desired. `debug_file` is the file to write debug messages to,
-        defaulting to stderr.
+        desired.
+
+        `concurrency` is a string indicating the concurrency library being used
+        in the measured code.  Without this, coverage.py will get incorrect
+        results.  Valid strings are "greenlet", "eventlet", "gevent", or
+        "thread" (the default).
+
+        .. versionadded:: 4.0
+            The `concurrency` parameter.
 
         """
-        from . import __version__
-
-        # A record of all the warnings that have been issued.
-        self._warnings = []
-
         # Build our configuration from a number of sources:
         # 1: defaults:
         self.config = CoverageConfig()
 
-        # 2: from the coveragerc file:
+        # 2: from the rcfile, .coveragerc or setup.cfg file:
         if config_file:
-            if config_file is True:
+            did_read_rc = False
+            # Some API users were specifying ".coveragerc" to mean the same as
+            # True, so make it so.
+            if config_file == ".coveragerc":
+                config_file = True
+            specified_file = (config_file is not True)
+            if not specified_file:
                 config_file = ".coveragerc"
-            try:
-                self.config.from_file(config_file)
-            except ValueError:
-                _, err, _ = sys.exc_info()
-                raise CoverageException(
-                    "Couldn't read config file %s: %s" % (config_file, err)
-                    )
+
+            did_read_rc = self.config.from_file(config_file)
+
+            if not did_read_rc:
+                if specified_file:
+                    raise CoverageException(
+                        "Couldn't read '%s' as a config file" % config_file
+                        )
+                self.config.from_file("setup.cfg", section_prefix="coverage:")
 
         # 3: from environment variables:
-        self.config.from_environment('COVERAGE_OPTIONS')
         env_data_file = os.environ.get('COVERAGE_FILE')
         if env_data_file:
             self.config.data_file = env_data_file
+        debugs = os.environ.get('COVERAGE_DEBUG')
+        if debugs:
+            self.config.debug.extend(debugs.split(","))
 
         # 4: from constructor arguments:
         self.config.from_args(
             data_file=data_file, cover_pylib=cover_pylib, timid=timid,
             branch=branch, parallel=bool_or_none(data_suffix),
             source=source, omit=omit, include=include, debug=debug,
+            concurrency=concurrency,
             )
 
-        # Create and configure the debugging controller.
-        self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
+        self._debug_file = None
+        self._auto_data = auto_data
+        self._data_suffix = data_suffix
+
+        # The matchers for _should_trace.
+        self.source_match = None
+        self.source_pkgs_match = None
+        self.pylib_match = self.cover_match = None
+        self.include_match = self.omit_match = None
+
+        # Is it ok for no data to be collected?
+        self._warn_no_data = True
+        self._warn_unimported_source = True
+
+        # A record of all the warnings that have been issued.
+        self._warnings = []
+
+        # Other instance attributes, set later.
+        self.omit = self.include = self.source = None
+        self.source_pkgs = None
+        self.data = self.data_files = self.collector = None
+        self.plugins = None
+        self.pylib_dirs = self.cover_dirs = None
+        self.data_suffix = self.run_suffix = None
+        self._exclude_re = None
+        self.debug = None
 
-        self.auto_data = auto_data
+        # State machine variables:
+        # Have we initialized everything?
+        self._inited = False
+        # Have we started collecting and not stopped it?
+        self._started = False
+        # Have we measured some data and not harvested it?
+        self._measured = False
+
+    def _init(self):
+        """Set all the initial state.
+
+        This is called by the public methods to initialize state. This lets us
+        construct a :class:`Coverage` object, then tweak its state before this
+        function is called.
 
-        # _exclude_re is a dict mapping exclusion list names to compiled
+        """
+        if self._inited:
+            return
+
+        # Create and configure the debugging controller. COVERAGE_DEBUG_FILE
+        # is an environment variable, the name of a file to append debug logs
+        # to.
+        if self._debug_file is None:
+            debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE")
+            if debug_file_name:
+                self._debug_file = open(debug_file_name, "a")
+            else:
+                self._debug_file = sys.stderr
+        self.debug = DebugControl(self.config.debug, self._debug_file)
+
+        # Load plugins
+        self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug)
+
+        # _exclude_re is a dict that maps exclusion list names to compiled
         # regexes.
         self._exclude_re = {}
         self._exclude_regex_stale()
 
-        self.file_locator = FileLocator()
+        files.set_relative_directory()
 
         # The source argument can be directories or package names.
         self.source = []
         self.source_pkgs = []
         for src in self.config.source or []:
             if os.path.exists(src):
-                self.source.append(self.file_locator.canonical_filename(src))
+                self.source.append(files.canonical_filename(src))
             else:
                 self.source_pkgs.append(src)
 
         self.omit = prep_patterns(self.config.omit)
         self.include = prep_patterns(self.config.include)
 
+        concurrency = self.config.concurrency
+        if concurrency == "multiprocessing":
+            patch_multiprocessing()
+            concurrency = None
+
         self.collector = Collector(
-            self._should_trace, timid=self.config.timid,
-            branch=self.config.branch, warn=self._warn
+            should_trace=self._should_trace,
+            check_include=self._check_include_omit_etc,
+            timid=self.config.timid,
+            branch=self.config.branch,
+            warn=self._warn,
+            concurrency=concurrency,
             )
 
+        # Early warning if we aren't going to be able to support plugins.
+        if self.plugins.file_tracers and not self.collector.supports_plugins:
+            self._warn(
+                "Plugin file tracers (%s) aren't supported with %s" % (
+                    ", ".join(
+                        plugin._coverage_plugin_name
+                            for plugin in self.plugins.file_tracers
+                        ),
+                    self.collector.tracer_name(),
+                    )
+                )
+            for plugin in self.plugins.file_tracers:
+                plugin._coverage_enabled = False
+
         # Suffixes are a bit tricky.  We want to use the data suffix only when
         # collecting data, not when combining data.  So we save it as
         # `self.run_suffix` now, and promote it to `self.data_suffix` if we
         # find that we are collecting data later.
-        if data_suffix or self.config.parallel:
-            if not isinstance(data_suffix, string_class):
+        if self._data_suffix or self.config.parallel:
+            if not isinstance(self._data_suffix, string_class):
                 # if data_suffix=True, use .machinename.pid.random
-                data_suffix = True
+                self._data_suffix = True
         else:
-            data_suffix = None
+            self._data_suffix = None
         self.data_suffix = None
-        self.run_suffix = data_suffix
+        self.run_suffix = self._data_suffix
 
         # Create the data file.  We do this at construction time so that the
         # data file will be written into the directory where the process
         # started rather than wherever the process eventually chdir'd to.
-        self.data = CoverageData(
-            basename=self.config.data_file,
-            collector="coverage v%s" % __version__,
-            debug=self.debug,
-            )
+        self.data = CoverageData(debug=self.debug)
+        self.data_files = CoverageDataFiles(basename=self.config.data_file)
 
-        # The dirs for files considered "installed with the interpreter".
-        self.pylib_dirs = []
+        # The directories for files considered "installed with the interpreter".
+        self.pylib_dirs = set()
         if not self.config.cover_pylib:
             # Look at where some standard modules are located. That's the
             # indication for "installed with the interpreter". In some
             # environments (virtualenv, for example), these modules may be
             # spread across a few locations. Look at all the candidate modules
             # we've imported, and take all the different ones.
-            for m in (atexit, os, random, socket, _structseq):
+            for m in (atexit, inspect, os, platform, _structseq, traceback):
                 if m is not None and hasattr(m, "__file__"):
-                    m_dir = self._canonical_dir(m)
-                    if m_dir not in self.pylib_dirs:
-                        self.pylib_dirs.append(m_dir)
+                    self.pylib_dirs.add(self._canonical_dir(m))
+            if _structseq and not hasattr(_structseq, '__file__'):
+                # PyPy 2.4 has no __file__ in the builtin modules, but the code
+                # objects still have the file names.  So dig into one to find
+                # the path to exclude.
+                structseq_new = _structseq.structseq_new
+                try:
+                    structseq_file = structseq_new.func_code.co_filename
+                except AttributeError:
+                    structseq_file = structseq_new.__code__.co_filename
+                self.pylib_dirs.add(self._canonical_dir(structseq_file))
 
-        # To avoid tracing the coverage code itself, we skip anything located
-        # where we are.
-        self.cover_dir = self._canonical_dir(__file__)
-
-        # The matchers for _should_trace.
-        self.source_match = None
-        self.pylib_match = self.cover_match = None
-        self.include_match = self.omit_match = None
+        # To avoid tracing the coverage.py code itself, we skip anything
+        # located where we are.
+        self.cover_dirs = [self._canonical_dir(__file__)]
+        if env.TESTING:
+            # When testing, we use PyContracts, which should be considered
+            # part of coverage.py, and it uses six. Exclude those directories
+            # just as we exclude ourselves.
+            import contracts, six
+            for mod in [contracts, six]:
+                self.cover_dirs.append(self._canonical_dir(mod))
 
         # Set the reporting precision.
         Numbers.set_precision(self.config.precision)
 
-        # Is it ok for no data to be collected?
-        self._warn_no_data = True
-        self._warn_unimported_source = True
-
-        # State machine variables:
-        # Have we started collecting and not stopped it?
-        self._started = False
-        # Have we measured some data and not harvested it?
-        self._measured = False
-
         atexit.register(self._atexit)
 
-    def _canonical_dir(self, morf):
-        """Return the canonical directory of the module or file `morf`."""
-        return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
-
-    def _source_for_file(self, filename):
-        """Return the source file for `filename`."""
-        if not filename.endswith(".py"):
-            if filename.endswith((".pyc", ".pyo")):
-                filename = filename[:-1]
-            elif filename.endswith("$py.class"): # jython
-                filename = filename[:-9] + ".py"
-        return filename
-
-    def _should_trace_with_reason(self, filename, frame):
-        """Decide whether to trace execution in `filename`, with a reason.
-
-        This function is called from the trace function.  As each new file name
-        is encountered, this function determines whether it is traced or not.
-
-        Returns a pair of values:  the first indicates whether the file should
-        be traced: it's a canonicalized filename if it should be traced, None
-        if it should not.  The second value is a string, the resason for the
-        decision.
-
-        """
-        if not filename:
-            # Empty string is pretty useless
-            return None, "empty string isn't a filename"
-
-        if filename.startswith('<'):
-            # Lots of non-file execution is represented with artificial
-            # filenames like "<string>", "<doctest readme.txt[0]>", or
-            # "<exec_function>".  Don't ever trace these executions, since we
-            # can't do anything with the data later anyway.
-            return None, "not a real filename"
-
-        self._check_for_packages()
-
-        # Compiled Python files have two filenames: frame.f_code.co_filename is
-        # the filename at the time the .pyc was compiled.  The second name is
-        # __file__, which is where the .pyc was actually loaded from.  Since
-        # .pyc files can be moved after compilation (for example, by being
-        # installed), we look for __file__ in the frame and prefer it to the
-        # co_filename value.
-        dunder_file = frame.f_globals.get('__file__')
-        if dunder_file:
-            filename = self._source_for_file(dunder_file)
-
-        # Jython reports the .class file to the tracer, use the source file.
-        if filename.endswith("$py.class"):
-            filename = filename[:-9] + ".py"
-
-        canonical = self.file_locator.canonical_filename(filename)
-
-        # If the user specified source or include, then that's authoritative
-        # about the outer bound of what to measure and we don't have to apply
-        # any canned exclusions. If they didn't, then we have to exclude the
-        # stdlib and coverage.py directories.
-        if self.source_match:
-            if not self.source_match.match(canonical):
-                return None, "falls outside the --source trees"
-        elif self.include_match:
-            if not self.include_match.match(canonical):
-                return None, "falls outside the --include trees"
-        else:
-            # If we aren't supposed to trace installed code, then check if this
-            # is near the Python standard library and skip it if so.
-            if self.pylib_match and self.pylib_match.match(canonical):
-                return None, "is in the stdlib"
-
-            # We exclude the coverage code itself, since a little of it will be
-            # measured otherwise.
-            if self.cover_match and self.cover_match.match(canonical):
-                return None, "is part of coverage.py"
-
-        # Check the file against the omit pattern.
-        if self.omit_match and self.omit_match.match(canonical):
-            return None, "is inside an --omit pattern"
-
-        return canonical, "because we love you"
-
-    def _should_trace(self, filename, frame):
-        """Decide whether to trace execution in `filename`.
-
-        Calls `_should_trace_with_reason`, and returns just the decision.
-
-        """
-        canonical, reason = self._should_trace_with_reason(filename, frame)
-        if self.debug.should('trace'):
-            if not canonical:
-                msg = "Not tracing %r: %s" % (filename, reason)
-            else:
-                msg = "Tracing %r" % (filename,)
-            self.debug.write(msg)
-        return canonical
-
-    def _warn(self, msg):
-        """Use `msg` as a warning."""
-        self._warnings.append(msg)
-        sys.stderr.write("Coverage.py warning: %s\n" % msg)
-
-    def _check_for_packages(self):
-        """Update the source_match matcher with latest imported packages."""
-        # Our self.source_pkgs attribute is a list of package names we want to
-        # measure.  Each time through here, we see if we've imported any of
-        # them yet.  If so, we add its file to source_match, and we don't have
-        # to look for that package any more.
-        if self.source_pkgs:
-            found = []
-            for pkg in self.source_pkgs:
-                try:
-                    mod = sys.modules[pkg]
-                except KeyError:
-                    continue
-
-                found.append(pkg)
-
-                try:
-                    pkg_file = mod.__file__
-                except AttributeError:
-                    pkg_file = None
-                else:
-                    d, f = os.path.split(pkg_file)
-                    if f.startswith('__init__'):
-                        # This is actually a package, return the directory.
-                        pkg_file = d
-                    else:
-                        pkg_file = self._source_for_file(pkg_file)
-                    pkg_file = self.file_locator.canonical_filename(pkg_file)
-                    if not os.path.exists(pkg_file):
-                        pkg_file = None
-
-                if pkg_file:
-                    self.source.append(pkg_file)
-                    self.source_match.add(pkg_file)
-                else:
-                    self._warn("Module %s has no Python source." % pkg)
-
-            for pkg in found:
-                self.source_pkgs.remove(pkg)
-
-    def use_cache(self, usecache):
-        """Control the use of a data file (incorrectly called a cache).
-
-        `usecache` is true or false, whether to read and write data on disk.
-
-        """
-        self.data.usefile(usecache)
-
-    def load(self):
-        """Load previously-collected coverage data from the data file."""
-        self.collector.reset()
-        self.data.read()
-
-    def start(self):
-        """Start measuring code coverage.
-
-        Coverage measurement actually occurs in functions called after `start`
-        is invoked.  Statements in the same scope as `start` won't be measured.
-
-        Once you invoke `start`, you must also call `stop` eventually, or your
-        process might not shut down cleanly.
-
-        """
-        if self.run_suffix:
-            # Calling start() means we're running code, so use the run_suffix
-            # as the data_suffix when we eventually save the data.
-            self.data_suffix = self.run_suffix
-        if self.auto_data:
-            self.load()
+        self._inited = True
 
         # Create the matchers we need for _should_trace
         if self.source or self.source_pkgs:
             self.source_match = TreeMatcher(self.source)
+            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
         else:
-            if self.cover_dir:
-                self.cover_match = TreeMatcher([self.cover_dir])
+            if self.cover_dirs:
+                self.cover_match = TreeMatcher(self.cover_dirs)
             if self.pylib_dirs:
                 self.pylib_match = TreeMatcher(self.pylib_dirs)
         if self.include:
@@ -391,14 +336,344 @@
             self.omit_match = FnmatchMatcher(self.omit)
 
         # The user may want to debug things, show info if desired.
+        wrote_any = False
         if self.debug.should('config'):
-            self.debug.write("Configuration values:")
             config_info = sorted(self.config.__dict__.items())
-            self.debug.write_formatted_info(config_info)
+            self.debug.write_formatted_info("config", config_info)
+            wrote_any = True
 
         if self.debug.should('sys'):
-            self.debug.write("Debugging info:")
-            self.debug.write_formatted_info(self.sysinfo())
+            self.debug.write_formatted_info("sys", self.sys_info())
+            for plugin in self.plugins:
+                header = "sys: " + plugin._coverage_plugin_name
+                info = plugin.sys_info()
+                self.debug.write_formatted_info(header, info)
+            wrote_any = True
+
+        if wrote_any:
+            self.debug.write_formatted_info("end", ())
+
+    def _canonical_dir(self, morf):
+        """Return the canonical directory of the module or file `morf`."""
+        morf_filename = PythonFileReporter(morf, self).filename
+        return os.path.split(morf_filename)[0]
+
+    def _source_for_file(self, filename):
+        """Return the source file for `filename`.
+
+        Given a file name being traced, return the best guess as to the source
+        file to attribute it to.
+
+        """
+        if filename.endswith(".py"):
+            # .py files are themselves source files.
+            return filename
+
+        elif filename.endswith((".pyc", ".pyo")):
+            # Bytecode files probably have source files near them.
+            py_filename = filename[:-1]
+            if os.path.exists(py_filename):
+                # Found a .py file, use that.
+                return py_filename
+            if env.WINDOWS:
+                # On Windows, it could be a .pyw file.
+                pyw_filename = py_filename + "w"
+                if os.path.exists(pyw_filename):
+                    return pyw_filename
+            # Didn't find source, but it's probably the .py file we want.
+            return py_filename
+
+        elif filename.endswith("$py.class"):
+            # Jython is easy to guess.
+            return filename[:-9] + ".py"
+
+        # No idea, just use the file name as-is.
+        return filename
+
+    def _name_for_module(self, module_globals, filename):
+        """Get the name of the module for a set of globals and file name.
+
+        For configurability's sake, we allow __main__ modules to be matched by
+        their importable name.
+
+        If loaded via runpy (aka -m), we can usually recover the "original"
+        full dotted module name, otherwise, we resort to interpreting the
+        file name to get the module's name.  In the case that the module name
+        can't be determined, None is returned.
+
+        """
+        dunder_name = module_globals.get('__name__', None)
+
+        if isinstance(dunder_name, str) and dunder_name != '__main__':
+            # This is the usual case: an imported module.
+            return dunder_name
+
+        loader = module_globals.get('__loader__', None)
+        for attrname in ('fullname', 'name'):   # attribute renamed in py3.2
+            if hasattr(loader, attrname):
+                fullname = getattr(loader, attrname)
+            else:
+                continue
+
+            if isinstance(fullname, str) and fullname != '__main__':
+                # Module loaded via: runpy -m
+                return fullname
+
+        # Script as first argument to Python command line.
+        inspectedname = inspect.getmodulename(filename)
+        if inspectedname is not None:
+            return inspectedname
+        else:
+            return dunder_name
+
+    def _should_trace_internal(self, filename, frame):
+        """Decide whether to trace execution in `filename`, with a reason.
+
+        This function is called from the trace function.  As each new file name
+        is encountered, this function determines whether it is traced or not.
+
+        Returns a FileDisposition object.
+
+        """
+        original_filename = filename
+        disp = _disposition_init(self.collector.file_disposition_class, filename)
+
+        def nope(disp, reason):
+            """Simple helper to make it easy to return NO."""
+            disp.trace = False
+            disp.reason = reason
+            return disp
+
+        # Compiled Python files have two file names: frame.f_code.co_filename is
+        # the file name at the time the .pyc was compiled.  The second name is
+        # __file__, which is where the .pyc was actually loaded from.  Since
+        # .pyc files can be moved after compilation (for example, by being
+        # installed), we look for __file__ in the frame and prefer it to the
+        # co_filename value.
+        dunder_file = frame.f_globals.get('__file__')
+        if dunder_file:
+            filename = self._source_for_file(dunder_file)
+            if original_filename and not original_filename.startswith('<'):
+                orig = os.path.basename(original_filename)
+                if orig != os.path.basename(filename):
+                    # Files shouldn't be renamed when moved. This happens when
+                    # exec'ing code.  If it seems like something is wrong with
+                    # the frame's file name, then just use the original.
+                    filename = original_filename
+
+        if not filename:
+            # Empty string is pretty useless.
+            return nope(disp, "empty string isn't a file name")
+
+        if filename.startswith('memory:'):
+            return nope(disp, "memory isn't traceable")
+
+        if filename.startswith('<'):
+            # Lots of non-file execution is represented with artificial
+            # file names like "<string>", "<doctest readme.txt[0]>", or
+            # "<exec_function>".  Don't ever trace these executions, since we
+            # can't do anything with the data later anyway.
+            return nope(disp, "not a real file name")
+
+        # Jython reports the .class file to the tracer, use the source file.
+        if filename.endswith("$py.class"):
+            filename = filename[:-9] + ".py"
+
+        canonical = files.canonical_filename(filename)
+        disp.canonical_filename = canonical
+
+        # Try the plugins, see if they have an opinion about the file.
+        plugin = None
+        for plugin in self.plugins.file_tracers:
+            if not plugin._coverage_enabled:
+                continue
+
+            try:
+                file_tracer = plugin.file_tracer(canonical)
+                if file_tracer is not None:
+                    file_tracer._coverage_plugin = plugin
+                    disp.trace = True
+                    disp.file_tracer = file_tracer
+                    if file_tracer.has_dynamic_source_filename():
+                        disp.has_dynamic_filename = True
+                    else:
+                        disp.source_filename = files.canonical_filename(
+                            file_tracer.source_filename()
+                        )
+                    break
+            except Exception:
+                self._warn(
+                    "Disabling plugin %r due to an exception:" % (
+                        plugin._coverage_plugin_name
+                    )
+                )
+                traceback.print_exc()
+                plugin._coverage_enabled = False
+                continue
+        else:
+            # No plugin wanted it: it's Python.
+            disp.trace = True
+            disp.source_filename = canonical
+
+        if not disp.has_dynamic_filename:
+            if not disp.source_filename:
+                raise CoverageException(
+                    "Plugin %r didn't set source_filename for %r" %
+                    (plugin, disp.original_filename)
+                )
+            reason = self._check_include_omit_etc_internal(
+                disp.source_filename, frame,
+            )
+            if reason:
+                nope(disp, reason)
+
+        return disp
+
+    def _check_include_omit_etc_internal(self, filename, frame):
+        """Check a file name against the include, omit, etc, rules.
+
+        Returns a string or None.  String means, don't trace, and is the reason
+        why.  None means no reason found to not trace.
+
+        """
+        modulename = self._name_for_module(frame.f_globals, filename)
+
+        # If the user specified source or include, then that's authoritative
+        # about the outer bound of what to measure and we don't have to apply
+        # any canned exclusions. If they didn't, then we have to exclude the
+        # stdlib and coverage.py directories.
+        if self.source_match:
+            if self.source_pkgs_match.match(modulename):
+                if modulename in self.source_pkgs:
+                    self.source_pkgs.remove(modulename)
+                return None  # There's no reason to skip this file.
+
+            if not self.source_match.match(filename):
+                return "falls outside the --source trees"
+        elif self.include_match:
+            if not self.include_match.match(filename):
+                return "falls outside the --include trees"
+        else:
+            # If we aren't supposed to trace installed code, then check if this
+            # is near the Python standard library and skip it if so.
+            if self.pylib_match and self.pylib_match.match(filename):
+                return "is in the stdlib"
+
+            # We exclude the coverage.py code itself, since a little of it
+            # will be measured otherwise.
+            if self.cover_match and self.cover_match.match(filename):
+                return "is part of coverage.py"
+
+        # Check the file against the omit pattern.
+        if self.omit_match and self.omit_match.match(filename):
+            return "is inside an --omit pattern"
+
+        # No reason found to skip this file.
+        return None
+
+    def _should_trace(self, filename, frame):
+        """Decide whether to trace execution in `filename`.
+
+        Calls `_should_trace_internal`, and returns the FileDisposition.
+
+        """
+        disp = self._should_trace_internal(filename, frame)
+        if self.debug.should('trace'):
+            self.debug.write(_disposition_debug_msg(disp))
+        return disp
+
+    def _check_include_omit_etc(self, filename, frame):
+        """Check a file name against the include/omit/etc, rules, verbosely.
+
+        Returns a boolean: True if the file should be traced, False if not.
+
+        """
+        reason = self._check_include_omit_etc_internal(filename, frame)
+        if self.debug.should('trace'):
+            if not reason:
+                msg = "Including %r" % (filename,)
+            else:
+                msg = "Not including %r: %s" % (filename, reason)
+            self.debug.write(msg)
+
+        return not reason
+
+    def _warn(self, msg):
+        """Use `msg` as a warning."""
+        self._warnings.append(msg)
+        if self.debug.should('pid'):
+            msg = "[%d] %s" % (os.getpid(), msg)
+        sys.stderr.write("Coverage.py warning: %s\n" % msg)
+
+    def get_option(self, option_name):
+        """Get an option from the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        Returns the value of the option.
+
+        .. versionadded:: 4.0
+
+        """
+        return self.config.get_option(option_name)
+
+    def set_option(self, option_name, value):
+        """Set an option in the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with ``"run:branch"``.
+
+        `value` is the new value for the option.  This should be a Python
+        value where appropriate.  For example, use True for booleans, not the
+        string ``"True"``.
+
+        As an example, calling::
+
+            cov.set_option("run:branch", True)
+
+        has the same effect as this configuration file::
+
+            [run]
+            branch = True
+
+        .. versionadded:: 4.0
+
+        """
+        self.config.set_option(option_name, value)
+
+    def use_cache(self, usecache):
+        """Obsolete method."""
+        self._init()
+        if not usecache:
+            self._warn("use_cache(False) is no longer supported.")
+
+    def load(self):
+        """Load previously-collected coverage data from the data file."""
+        self._init()
+        self.collector.reset()
+        self.data_files.read(self.data)
+
+    def start(self):
+        """Start measuring code coverage.
+
+        Coverage measurement actually occurs in functions called after
+        :meth:`start` is invoked.  Statements in the same scope as
+        :meth:`start` won't be measured.
+
+        Once you invoke :meth:`start`, you must also call :meth:`stop`
+        eventually, or your process might not shut down cleanly.
+
+        """
+        self._init()
+        if self.run_suffix:
+            # Calling start() means we're running code, so use the run_suffix
+            # as the data_suffix when we eventually save the data.
+            self.data_suffix = self.run_suffix
+        if self._auto_data:
+            self.load()
 
         self.collector.start()
         self._started = True
@@ -406,14 +681,15 @@
 
     def stop(self):
         """Stop measuring code coverage."""
+        if self._started:
+            self.collector.stop()
         self._started = False
-        self.collector.stop()
 
     def _atexit(self):
         """Clean up on process shutdown."""
         if self._started:
             self.stop()
-        if self.auto_data:
+        if self._auto_data:
             self.save()
 
     def erase(self):
@@ -423,11 +699,14 @@
         discarding the data file.
 
         """
+        self._init()
         self.collector.reset()
         self.data.erase()
+        self.data_files.erase(parallel=self.config.parallel)
 
     def clear_exclude(self, which='exclude'):
         """Clear the exclude list."""
+        self._init()
         setattr(self.config, which + "_list", [])
         self._exclude_regex_stale()
 
@@ -446,6 +725,7 @@
         is marked for special treatment during reporting.
 
         """
+        self._init()
         excl_list = getattr(self.config, which + "_list")
         excl_list.append(regex)
         self._exclude_regex_stale()
@@ -464,79 +744,86 @@
     def get_exclude_list(self, which='exclude'):
         """Return a list of excluded regex patterns.
 
-        `which` indicates which list is desired.  See `exclude` for the lists
-        that are available, and their meaning.
+        `which` indicates which list is desired.  See :meth:`exclude` for the
+        lists that are available, and their meaning.
 
         """
+        self._init()
         return getattr(self.config, which + "_list")
 
     def save(self):
         """Save the collected coverage data to the data file."""
-        data_suffix = self.data_suffix
-        if data_suffix is True:
-            # If data_suffix was a simple true value, then make a suffix with
-            # plenty of distinguishing information.  We do this here in
-            # `save()` at the last minute so that the pid will be correct even
-            # if the process forks.
-            extra = ""
-            if _TEST_NAME_FILE:
-                f = open(_TEST_NAME_FILE)
-                test_name = f.read()
-                f.close()
-                extra = "." + test_name
-            data_suffix = "%s%s.%s.%06d" % (
-                socket.gethostname(), extra, os.getpid(),
-                random.randint(0, 999999)
-                )
+        self._init()
+        self.get_data()
+        self.data_files.write(self.data, suffix=self.data_suffix)
 
-        self._harvest_data()
-        self.data.write(suffix=data_suffix)
-
-    def combine(self):
+    def combine(self, data_paths=None):
         """Combine together a number of similarly-named coverage data files.
 
         All coverage data files whose name starts with `data_file` (from the
         coverage() constructor) will be read, and combined together into the
         current measurements.
 
+        `data_paths` is a list of files or directories from which data should
+        be combined. If no list is passed, then the data files from the
+        directory indicated by the current data file (probably the current
+        directory) will be combined.
+
+        .. versionadded:: 4.0
+            The `data_paths` parameter.
+
         """
+        self._init()
+        self.get_data()
+
         aliases = None
         if self.config.paths:
-            aliases = PathAliases(self.file_locator)
+            aliases = PathAliases()
             for paths in self.config.paths.values():
                 result = paths[0]
                 for pattern in paths[1:]:
                     aliases.add(pattern, result)
-        self.data.combine_parallel_data(aliases=aliases)
 
-    def _harvest_data(self):
+        self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths)
+
+    def get_data(self):
         """Get the collected data and reset the collector.
 
         Also warn about various problems collecting data.
 
+        Returns a :class:`coverage.CoverageData`, the collected coverage data.
+
+        .. versionadded:: 4.0
+
         """
+        self._init()
         if not self._measured:
             return
 
-        self.data.add_line_data(self.collector.get_line_data())
-        self.data.add_arc_data(self.collector.get_arc_data())
-        self.collector.reset()
+        self.collector.save_data(self.data)
 
         # If there are still entries in the source_pkgs list, then we never
         # encountered those packages.
         if self._warn_unimported_source:
             for pkg in self.source_pkgs:
-                self._warn("Module %s was never imported." % pkg)
+                if pkg not in sys.modules:
+                    self._warn("Module %s was never imported." % pkg)
+                elif not (
+                    hasattr(sys.modules[pkg], '__file__') and
+                    os.path.exists(sys.modules[pkg].__file__)
+                ):
+                    self._warn("Module %s has no Python source." % pkg)
+                else:
+                    self._warn("Module %s was previously imported, but not measured." % pkg)
 
         # Find out if we got any data.
-        summary = self.data.summary()
-        if not summary and self._warn_no_data:
+        if not self.data and self._warn_no_data:
             self._warn("No data was collected.")
 
         # Find files that were never executed at all.
         for src in self.source:
             for py_file in find_python_files(src):
-                py_file = self.file_locator.canonical_filename(py_file)
+                py_file = files.canonical_filename(py_file)
 
                 if self.omit_match and self.omit_match.match(py_file):
                     # Turns out this file was omitted, so don't pull it back
@@ -545,7 +832,20 @@
 
                 self.data.touch_file(py_file)
 
+        # Add run information.
+        self.data.add_run_info(
+            brief_sys=" ".join([
+                platform.python_implementation(),
+                platform.python_version(),
+                platform.system(),
+            ])
+        )
+
+        if self.config.note:
+            self.data.add_run_info(note=self.config.note)
+
         self._measured = False
+        return self.data
 
     # Backward compatibility with version 1.
     def analysis(self, morf):
@@ -556,10 +856,10 @@
     def analysis2(self, morf):
         """Analyze a module.
 
-        `morf` is a module or a filename.  It will be analyzed to determine
+        `morf` is a module or a file name.  It will be analyzed to determine
         its coverage statistics.  The return value is a 5-tuple:
 
-        * The filename for the module.
+        * The file name for the module.
         * A list of line numbers of executable statements.
         * A list of line numbers of excluded statements.
         * A list of line numbers of statements not run (missing from
@@ -570,6 +870,7 @@
         coverage data.
 
         """
+        self._init()
         analysis = self._analyze(morf)
         return (
             analysis.filename,
@@ -585,38 +886,91 @@
         Returns an `Analysis` object.
 
         """
-        self._harvest_data()
-        if not isinstance(it, CodeUnit):
-            it = code_unit_factory(it, self.file_locator)[0]
+        self.get_data()
+        if not isinstance(it, FileReporter):
+            it = self._get_file_reporter(it)
+
+        return Analysis(self.data, it)
+
+    def _get_file_reporter(self, morf):
+        """Get a FileReporter for a module or file name."""
+        plugin = None
+        file_reporter = "python"
+
+        if isinstance(morf, string_class):
+            abs_morf = abs_file(morf)
+            plugin_name = self.data.file_tracer(abs_morf)
+            if plugin_name:
+                plugin = self.plugins.get(plugin_name)
+
+        if plugin:
+            file_reporter = plugin.file_reporter(abs_morf)
+            if file_reporter is None:
+                raise CoverageException(
+                    "Plugin %r did not provide a file reporter for %r." % (
+                        plugin._coverage_plugin_name, morf
+                    )
+                )
+
+        if file_reporter == "python":
+            file_reporter = PythonFileReporter(morf, self)
+
+        return file_reporter
 
-        return Analysis(self, it)
+    def _get_file_reporters(self, morfs=None):
+        """Get a list of FileReporters for a list of modules or file names.
+
+        For each module or file name in `morfs`, find a FileReporter.  Return
+        the list of FileReporters.
+
+        If `morfs` is a single module or file name, this returns a list of one
+        FileReporter.  If `morfs` is empty or None, then the list of all files
+        measured is used to find the FileReporters.
+
+        """
+        if not morfs:
+            morfs = self.data.measured_files()
 
-    def report(self, morfs=None, show_missing=True, ignore_errors=None,
-                file=None,                          # pylint: disable=W0622
-                omit=None, include=None
-                ):
+        # Be sure we have a list.
+        if not isinstance(morfs, (list, tuple)):
+            morfs = [morfs]
+
+        file_reporters = []
+        for morf in morfs:
+            file_reporter = self._get_file_reporter(morf)
+            file_reporters.append(file_reporter)
+
+        return file_reporters
+
+    def report(
+        self, morfs=None, show_missing=True, ignore_errors=None,
+        file=None,                  # pylint: disable=redefined-builtin
+        omit=None, include=None, skip_covered=False,
+    ):
         """Write a summary report to `file`.
 
         Each module in `morfs` is listed, with counts of statements, executed
         statements, missing statements, and a list of lines missed.
 
-        `include` is a list of filename patterns.  Modules whose filenames
-        match those patterns will be included in the report. Modules matching
-        `omit` will not be included in the report.
+        `include` is a list of file name patterns.  Files that match will be
+        included in the report. Files matching `omit` will not be included in
+        the report.
 
         Returns a float, the total percentage covered.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include,
-            show_missing=show_missing,
+            show_missing=show_missing, skip_covered=skip_covered,
             )
         reporter = SummaryReporter(self, self.config)
         return reporter.report(morfs, outfile=file)
 
-    def annotate(self, morfs=None, directory=None, ignore_errors=None,
-                    omit=None, include=None):
+    def annotate(
+        self, morfs=None, directory=None, ignore_errors=None,
+        omit=None, include=None,
+    ):
         """Annotate a list of modules.
 
         Each module in `morfs` is annotated.  The source is written to a new
@@ -624,10 +978,10 @@
         marker to indicate the coverage of the line.  Covered lines have ">",
         excluded lines have "-", and missing lines have "!".
 
-        See `coverage.report()` for other arguments.
+        See :meth:`report` for other arguments.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include
             )
@@ -648,12 +1002,12 @@
         `title` is a text string (not HTML) to use as the title of the HTML
         report.
 
-        See `coverage.report()` for other arguments.
+        See :meth:`report` for other arguments.
 
         Returns a float, the total percentage covered.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include,
             html_dir=directory, extra_css=extra_css, html_title=title,
@@ -661,8 +1015,10 @@
         reporter = HtmlReporter(self, self.config)
         return reporter.report(morfs)
 
-    def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
-                    omit=None, include=None):
+    def xml_report(
+        self, morfs=None, outfile=None, ignore_errors=None,
+        omit=None, include=None,
+    ):
         """Generate an XML report of coverage results.
 
         The report is compatible with Cobertura reports.
@@ -670,12 +1026,12 @@
         Each module in `morfs` is included in the report.  `outfile` is the
         path to write the file to, "-" will write to stdout.
 
-        See `coverage.report()` for other arguments.
+        See :meth:`report` for other arguments.
 
         Returns a float, the total percentage covered.
 
         """
-        self._harvest_data()
+        self.get_data()
         self.config.from_args(
             ignore_errors=ignore_errors, omit=omit, include=include,
             xml_output=outfile,
@@ -686,69 +1042,112 @@
             if self.config.xml_output == '-':
                 outfile = sys.stdout
             else:
+                # Ensure that the output directory is created; done here
+                # because this report pre-opens the output file.
+                # HTMLReport does this using the Report plumbing because
+                # its task is more complex, being multiple files.
+                output_dir = os.path.dirname(self.config.xml_output)
+                if output_dir and not os.path.isdir(output_dir):
+                    os.makedirs(output_dir)
                 outfile = open(self.config.xml_output, "w")
                 file_to_close = outfile
         try:
-            try:
-                reporter = XmlReporter(self, self.config)
-                return reporter.report(morfs, outfile=outfile)
-            except CoverageException:
-                delete_file = True
-                raise
+            reporter = XmlReporter(self, self.config)
+            return reporter.report(morfs, outfile=outfile)
+        except CoverageException:
+            delete_file = True
+            raise
         finally:
             if file_to_close:
                 file_to_close.close()
                 if delete_file:
                     file_be_gone(self.config.xml_output)
 
-    def sysinfo(self):
+    def sys_info(self):
         """Return a list of (key, value) pairs showing internal information."""
 
         import coverage as covmod
-        import platform, re
+
+        self._init()
 
-        try:
-            implementation = platform.python_implementation()
-        except AttributeError:
-            implementation = "unknown"
+        ft_plugins = []
+        for ft in self.plugins.file_tracers:
+            ft_name = ft._coverage_plugin_name
+            if not ft._coverage_enabled:
+                ft_name += " (disabled)"
+            ft_plugins.append(ft_name)
 
         info = [
             ('version', covmod.__version__),
             ('coverage', covmod.__file__),
-            ('cover_dir', self.cover_dir),
+            ('cover_dirs', self.cover_dirs),
             ('pylib_dirs', self.pylib_dirs),
             ('tracer', self.collector.tracer_name()),
+            ('plugins.file_tracers', ft_plugins),
             ('config_files', self.config.attempted_config_files),
             ('configs_read', self.config.config_files),
-            ('data_path', self.data.filename),
+            ('data_path', self.data_files.filename),
             ('python', sys.version.replace('\n', '')),
             ('platform', platform.platform()),
-            ('implementation', implementation),
+            ('implementation', platform.python_implementation()),
             ('executable', sys.executable),
             ('cwd', os.getcwd()),
             ('path', sys.path),
-            ('environment', sorted([
-                ("%s = %s" % (k, v)) for k, v in iitems(os.environ)
-                    if re.search(r"^COV|^PY", k)
-                ])),
+            ('environment', sorted(
+                ("%s = %s" % (k, v))
+                for k, v in iitems(os.environ)
+                if k.startswith(("COV", "PY"))
+            )),
             ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
             ]
-        if self.source_match:
-            info.append(('source_match', self.source_match.info()))
-        if self.include_match:
-            info.append(('include_match', self.include_match.info()))
-        if self.omit_match:
-            info.append(('omit_match', self.omit_match.info()))
-        if self.cover_match:
-            info.append(('cover_match', self.cover_match.info()))
-        if self.pylib_match:
-            info.append(('pylib_match', self.pylib_match.info()))
+
+        matcher_names = [
+            'source_match', 'source_pkgs_match',
+            'include_match', 'omit_match',
+            'cover_match', 'pylib_match',
+            ]
+
+        for matcher_name in matcher_names:
+            matcher = getattr(self, matcher_name)
+            if matcher:
+                matcher_info = matcher.info()
+            else:
+                matcher_info = '-none-'
+            info.append((matcher_name, matcher_info))
 
         return info
 
 
+# FileDisposition "methods": FileDisposition is a pure value object, so it can
+# be implemented in either C or Python.  Acting on them is done with these
+# functions.
+
+def _disposition_init(cls, original_filename):
+    """Construct and initialize a new FileDisposition object."""
+    disp = cls()
+    disp.original_filename = original_filename
+    disp.canonical_filename = original_filename
+    disp.source_filename = None
+    disp.trace = False
+    disp.reason = ""
+    disp.file_tracer = None
+    disp.has_dynamic_filename = False
+    return disp
+
+
+def _disposition_debug_msg(disp):
+    """Make a nice debug message of what the FileDisposition is doing."""
+    if disp.trace:
+        msg = "Tracing %r" % (disp.original_filename,)
+        if disp.file_tracer:
+            msg += ": will be traced by %r" % disp.file_tracer
+    else:
+        msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
+    return msg
+
+
 def process_startup():
-    """Call this at Python startup to perhaps measure coverage.
+    """Call this at Python start-up to perhaps measure coverage.
 
     If the environment variable COVERAGE_PROCESS_START is defined, coverage
     measurement is started.  The value of the variable is the config file
@@ -768,15 +1167,30 @@
 
     """
     cps = os.environ.get("COVERAGE_PROCESS_START")
-    if cps:
-        cov = coverage(config_file=cps, auto_data=True)
-        cov.start()
-        cov._warn_no_data = False
-        cov._warn_unimported_source = False
+    if not cps:
+        # No request for coverage, nothing to do.
+        return
 
+    # This function can be called more than once in a process. This happens
+    # because some virtualenv configurations make the same directory visible
+    # twice in sys.path.  This means that the .pth file will be found twice,
+    # and executed twice, executing this function twice.  We set a global
+    # flag (an attribute on this function) to indicate that coverage.py has
+    # already been started, so we can avoid doing it twice.
+    #
+    # https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more
+    # details.
 
-# A hack for debugging testing in subprocesses.
-_TEST_NAME_FILE = "" #"/tmp/covtest.txt"
+    if hasattr(process_startup, "done"):
+        # We've annotated this function before, so we must have already
+        # started coverage.py in this process.  Nothing to do.
+        return
+
+    process_startup.done = True
+    cov = Coverage(config_file=cps, auto_data=True)
+    cov.start()
+    cov._warn_no_data = False
+    cov._warn_unimported_source = False
 
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/data.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/data.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,82 +1,642 @@
-"""Coverage data for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Coverage data for coverage.py."""
 
+import glob
+import json
+import optparse
 import os
+import os.path
+import random
+import re
+import socket
 import sys
 
-from .backward import iitems, pickle, sorted    # pylint: disable=W0622
-from .files import PathAliases
-from .misc import file_be_gone
+from coverage import env
+from coverage.backward import iitems, string_class
+from coverage.debug import _TEST_NAME_FILE
+from coverage.files import PathAliases
+from coverage.misc import CoverageException, file_be_gone
 
 
 class CoverageData(object):
     """Manages collected coverage data, including file storage.
 
-    The data file format is a pickled dict, with these keys:
+    This class is the public supported API to the data coverage.py collects
+    during program execution.  It includes information about what code was
+    executed. It does not include information from the analysis phase, to
+    determine what lines could have been executed, or what lines were not
+    executed.
+
+    .. note::
+
+        The file format is not documented or guaranteed.  It will change in
+        the future, in possibly complicated ways.  Do not read coverage.py
+        data files directly.  Use this API to avoid disruption.
+
+    There are a number of kinds of data that can be collected:
 
-        * collector: a string identifying the collecting software
+    * **lines**: the line numbers of source lines that were executed.
+      These are always available.
+
+    * **arcs**: pairs of source and destination line numbers for transitions
+      between source lines.  These are only available if branch coverage was
+      used.
+
+    * **file tracer names**: the module names of the file tracer plugins that
+      handled each file in the data.
+
+    * **run information**: information about the program execution.  This is
+      written during "coverage run", and then accumulated during "coverage
+      combine".
 
-        * lines: a dict mapping filenames to sorted lists of line numbers
-          executed:
-            { 'file1': [17,23,45],  'file2': [1,2,3], ... }
+    Lines, arcs, and file tracer names are stored for each source file. File
+    names in this API are case-sensitive, even on platforms with
+    case-insensitive file systems.
+
+    To read a coverage.py data file, use :meth:`read_file`, or
+    :meth:`read_fileobj` if you have an already-opened file.  You can then
+    access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
+    or :meth:`file_tracer`.  Run information is available with
+    :meth:`run_infos`.
+
+    The :meth:`has_arcs` method indicates whether arc data is available.  You
+    can get a list of the files in the data with :meth:`measured_files`.
+    A summary of the line data is available from :meth:`line_counts`.  As with
+    most Python containers, you can determine if there is any data at all by
+    using this object as a boolean value.
 
-        * arcs: a dict mapping filenames to sorted lists of line number pairs:
-            { 'file1': [(17,23), (17,25), (25,26)], ... }
+
+    Most data files will be created by coverage.py itself, but you can use
+    methods here to create data files if you like.  The :meth:`add_lines`,
+    :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
+    that are convenient for coverage.py.  The :meth:`add_run_info` method adds
+    key-value pairs to the run information.
+
+    To add a file without any measured data, use :meth:`touch_file`.
+
+    You write to a named file with :meth:`write_file`, or to an already opened
+    file with :meth:`write_fileobj`.
+
+    You can clear the data in memory with :meth:`erase`.  Two data collections
+    can be combined by using :meth:`update` on one :class:`CoverageData`,
+    passing it the other.
 
     """
 
-    def __init__(self, basename=None, collector=None, debug=None):
-        """Create a CoverageData.
+    # The data file format is JSON, with these keys:
+    #
+    #     * lines: a dict mapping file names to lists of line numbers
+    #       executed::
+    #
+    #         { "file1": [17,23,45], "file2": [1,2,3], ... }
+    #
+    #     * arcs: a dict mapping file names to lists of line number pairs::
+    #
+    #         { "file1": [[17,23], [17,25], [25,26]], ... }
+    #
+    #     * file_tracers: a dict mapping file names to plugin names::
+    #
+    #         { "file1": "django.coverage", ... }
+    #
+    #     * runs: a list of dicts of information about the coverage.py runs
+    #       contributing to the data::
+    #
+    #         [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
+    #
+    # Only one of `lines` or `arcs` will be present: with branch coverage, data
+    # is stored as arcs. Without branch coverage, it is stored as lines.  The
+    # line data is easily recovered from the arcs: it is all the first elements
+    # of the pairs that are greater than zero.
 
-        `basename` is the name of the file to use for storing data.
-
-        `collector` is a string describing the coverage measurement software.
+    def __init__(self, debug=None):
+        """Create a CoverageData.
 
         `debug` is a `DebugControl` object for writing debug messages.
 
         """
-        self.collector = collector or 'unknown'
-        self.debug = debug
-
-        self.use_file = True
-
-        # Construct the filename that will be used for data file storage, if we
-        # ever do any file storage.
-        self.filename = basename or ".coverage"
-        self.filename = os.path.abspath(self.filename)
+        self._debug = debug
 
         # A map from canonical Python source file name to a dictionary in
         # which there's an entry for each line number that has been
         # executed:
         #
-        #   {
-        #       'filename1.py': { 12: None, 47: None, ... },
-        #       ...
-        #       }
+        #   { 'filename1.py': [12, 47, 1001], ... }
         #
-        self.lines = {}
+        self._lines = None
 
         # A map from canonical Python source file name to a dictionary with an
         # entry for each pair of line numbers forming an arc:
         #
-        #   {
-        #       'filename1.py': { (12,14): None, (47,48): None, ... },
-        #       ...
-        #       }
+        #   { 'filename1.py': [(12,14), (47,48), ... ], ... }
+        #
+        self._arcs = None
+
+        # A map from canonical source file name to a plugin module name:
+        #
+        #   { 'filename1.py': 'django.coverage', ... }
         #
-        self.arcs = {}
+        self._file_tracers = {}
+
+        # A list of dicts of information about the coverage.py runs.
+        self._runs = []
+
+    def __repr__(self):
+        return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
+            klass=self.__class__.__name__,
+            lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
+            arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
+            tracers="{{{0}}}".format(len(self._file_tracers)),
+            runs="[{0}]".format(len(self._runs)),
+        )
+
+    ##
+    ## Reading data
+    ##
+
+    def has_arcs(self):
+        """Does this data have arcs?
+
+        Arc data is only available if branch coverage was used during
+        collection.
+
+        Returns a boolean.
+
+        """
+        return self._has_arcs()
+
+    def lines(self, filename):
+        """Get the list of lines executed for a file.
+
+        If the file was not measured, returns None.  A file might be measured,
+        and have no lines executed, in which case an empty list is returned.
+
+        If the file was executed, returns a list of integers, the line numbers
+        executed in the file. The list is in no particular order.
+
+        """
+        if self._arcs is not None:
+            if filename in self._arcs:
+                return [s for s, __ in self._arcs[filename] if s > 0]
+        elif self._lines is not None:
+            if filename in self._lines:
+                return self._lines[filename]
+        return None
+
+    def arcs(self, filename):
+        """Get the list of arcs executed for a file.
+
+        If the file was not measured, returns None.  A file might be measured,
+        and have no arcs executed, in which case an empty list is returned.
+
+        If the file was executed, returns a list of 2-tuples of integers. Each
+        pair is a starting line number and an ending line number for a
+        transition from one line to another. The list is in no particular
+        order.
+
+        Negative numbers have special meaning.  If the starting line number is
+        -N, it represents an entry to the code object that starts at line N.
+        If the ending ling number is -N, it's an exit from the code object that
+        starts at line N.
+
+        """
+        if self._arcs is not None:
+            if filename in self._arcs:
+                return self._arcs[filename]
+        return None
+
+    def file_tracer(self, filename):
+        """Get the plugin name of the file tracer for a file.
+
+        Returns the name of the plugin that handles this file.  If the file was
+        measured, but didn't use a plugin, then "" is returned.  If the file
+        was not measured, then None is returned.
+
+        """
+        # Because the vast majority of files involve no plugin, we don't store
+        # them explicitly in self._file_tracers.  Check the measured data
+        # instead to see if it was a known file with no plugin.
+        if filename in (self._arcs or self._lines or {}):
+            return self._file_tracers.get(filename, "")
+        return None
+
+    def run_infos(self):
+        """Return the list of dicts of run information.
+
+        For data collected during a single run, this will be a one-element
+        list.  If data has been combined, there will be one element for each
+        original data file.
+
+        """
+        return self._runs
+
+    def measured_files(self):
+        """A list of all files that had been measured."""
+        return list(self._arcs or self._lines or {})
+
+    def line_counts(self, fullpath=False):
+        """Return a dict summarizing the line coverage data.
+
+        Keys are based on the file names, and values are the number of executed
+        lines.  If `fullpath` is true, then the keys are the full pathnames of
+        the files, otherwise they are the basenames of the files.
+
+        Returns a dict mapping file names to counts of lines.
+
+        """
+        summ = {}
+        if fullpath:
+            filename_fn = lambda f: f
+        else:
+            filename_fn = os.path.basename
+        for filename in self.measured_files():
+            summ[filename_fn(filename)] = len(self.lines(filename))
+        return summ
+
+    def __nonzero__(self):
+        return bool(self._lines or self._arcs)
+
+    __bool__ = __nonzero__
+
+    def read_fileobj(self, file_obj):
+        """Read the coverage data from the given file object.
+
+        Should only be used on an empty CoverageData object.
+
+        """
+        data = self._read_raw_data(file_obj)
+
+        self._lines = self._arcs = None
+
+        if 'lines' in data:
+            self._lines = dict(
+                (fname.encode(sys.getfilesystemencoding()), linenos)
+                for fname, linenos in iitems(data['lines'])
+            )
+
+        if 'arcs' in data:
+            self._arcs = dict(
+                (fname.encode(sys.getfilesystemencoding()),
+                    [tuple(pair) for pair in arcs])
+                for fname, arcs in iitems(data['arcs'])
+            )
+        self._file_tracers = data.get('file_tracers', {})
+        self._runs = data.get('runs', [])
+
+        self._validate()
+
+    def read_file(self, filename):
+        """Read the coverage data from `filename` into this object."""
+        if self._debug and self._debug.should('dataio'):
+            self._debug.write("Reading data from %r" % (filename,))
+        with self._open_for_reading(filename) as f:
+            self.read_fileobj(f)
+
+    _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
+
+    @classmethod
+    def _open_for_reading(cls, filename):
+        """Open a file appropriately for reading data."""
+        return open(filename, "r")
+
+    @classmethod
+    def _read_raw_data(cls, file_obj):
+        """Read the raw data from a file object."""
+        go_away = file_obj.read(len(cls._GO_AWAY))
+        if go_away != cls._GO_AWAY:
+            raise CoverageException("Doesn't seem to be a coverage.py data file")
+        return json.load(file_obj)
+
+    @classmethod
+    def _read_raw_data_file(cls, filename):
+        """Read the raw data from a file, for debugging."""
+        with cls._open_for_reading(filename) as f:
+            return cls._read_raw_data(f)
+
+    ##
+    ## Writing data
+    ##
+
+    def add_lines(self, line_data):
+        """Add measured line data.
+
+        `line_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { lineno: None, ... }, ...}
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding lines: %d files, %d lines total" % (
+                len(line_data), sum(len(lines) for lines in line_data.values())
+            ))
+        if self._has_arcs():
+            raise CoverageException("Can't add lines to existing arc data")
+
+        if self._lines is None:
+            self._lines = {}
+        for filename, linenos in iitems(line_data):
+            if filename in self._lines:
+                new_linenos = set(self._lines[filename])
+                new_linenos.update(linenos)
+                linenos = new_linenos
+            self._lines[filename] = list(linenos)
+
+        self._validate()
+
+    def add_arcs(self, arc_data):
+        """Add measured arc data.
+
+        `arc_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { (l1,l2): None, ... }, ...}
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding arcs: %d files, %d arcs total" % (
+                len(arc_data), sum(len(arcs) for arcs in arc_data.values())
+            ))
+        if self._has_lines():
+            raise CoverageException("Can't add arcs to existing line data")
+
+        if self._arcs is None:
+            self._arcs = {}
+        for filename, arcs in iitems(arc_data):
+            if filename in self._arcs:
+                new_arcs = set(self._arcs[filename])
+                new_arcs.update(arcs)
+                arcs = new_arcs
+            self._arcs[filename] = list(arcs)
+
+        self._validate()
+
+    def add_file_tracers(self, file_tracers):
+        """Add per-file plugin information.
+
+        `file_tracers` is { filename: plugin_name, ... }
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
 
-    def usefile(self, use_file=True):
-        """Set whether or not to use a disk file for data."""
-        self.use_file = use_file
+        existing_files = self._arcs or self._lines or {}
+        for filename, plugin_name in iitems(file_tracers):
+            if filename not in existing_files:
+                raise CoverageException(
+                    "Can't add file tracer data for unmeasured file '%s'" % (filename,)
+                )
+            existing_plugin = self._file_tracers.get(filename)
+            if existing_plugin is not None and plugin_name != existing_plugin:
+                raise CoverageException(
+                    "Conflicting file tracer name for '%s': %r vs %r" % (
+                        filename, existing_plugin, plugin_name,
+                    )
+                )
+            self._file_tracers[filename] = plugin_name
+
+        self._validate()
+
+    def add_run_info(self, **kwargs):
+        """Add information about the run.
+
+        Keywords are arbitrary, and are stored in the run dictionary. Values
+        must be JSON serializable.  You may use this function more than once,
+        but repeated keywords overwrite each other.
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding run info: %r" % (kwargs,))
+        if not self._runs:
+            self._runs = [{}]
+        self._runs[0].update(kwargs)
+        self._validate()
+
+    def touch_file(self, filename):
+        """Ensure that `filename` appears in the data, empty if needed."""
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Touching %r" % (filename,))
+        if not self._has_arcs() and not self._has_lines():
+            raise CoverageException("Can't touch files in an empty CoverageData")
+
+        if self._has_arcs():
+            where = self._arcs
+        else:
+            where = self._lines
+        where.setdefault(filename, [])
+
+        self._validate()
+
+    def write_fileobj(self, file_obj):
+        """Write the coverage data to `file_obj`."""
+
+        # Create the file data.
+        file_data = {}
+
+        if self._has_arcs():
+            file_data['arcs'] = dict(
+                (fname.decode(sys.getfilesystemencoding()),
+                    [tuple(pair) for pair in self._arcs])
+                for fname, arcs in iitems(data['arcs'])
+            )
+
+        if self._has_lines():
+            file_data['lines'] = dict(
+                (fname.decode(sys.getfilesystemencoding()), linenos)
+                for fname, linenos in iitems(self._lines)
+            )
+
+        if self._file_tracers:
+            file_data['file_tracers'] = self._file_tracers
+
+        if self._runs:
+            file_data['runs'] = self._runs
+
+        # Write the data to the file.
+        file_obj.write(self._GO_AWAY)
+        json.dump(file_data, file_obj)
+
+    def write_file(self, filename):
+        """Write the coverage data to `filename`."""
+        if self._debug and self._debug.should('dataio'):
+            self._debug.write("Writing data to %r" % (filename,))
+        with open(filename, 'w') as fdata:
+            self.write_fileobj(fdata)
+
+    def erase(self):
+        """Erase the data in this object."""
+        self._lines = None
+        self._arcs = None
+        self._file_tracers = {}
+        self._runs = []
+        self._validate()
+
+    def update(self, other_data, aliases=None):
+        """Update this data with data from another `CoverageData`.
+
+        If `aliases` is provided, it's a `PathAliases` object that is used to
+        re-map paths to match the local machine's.
+
+        """
+        if self._has_lines() and other_data._has_arcs():
+            raise CoverageException("Can't combine arc data with line data")
+        if self._has_arcs() and other_data._has_lines():
+            raise CoverageException("Can't combine line data with arc data")
+
+        aliases = aliases or PathAliases()
+
+        # _file_tracers: only have a string, so they have to agree.
+        # Have to do these first, so that our examination of self._arcs and
+        # self._lines won't be confused by data updated from other_data.
+        for filename in other_data.measured_files():
+            other_plugin = other_data.file_tracer(filename)
+            filename = aliases.map(filename)
+            this_plugin = self.file_tracer(filename)
+            if this_plugin is None:
+                if other_plugin:
+                    self._file_tracers[filename] = other_plugin
+            elif this_plugin != other_plugin:
+                raise CoverageException(
+                    "Conflicting file tracer name for '%s': %r vs %r" % (
+                        filename, this_plugin, other_plugin,
+                    )
+                )
+
+        # _runs: add the new runs to these runs.
+        self._runs.extend(other_data._runs)
 
-    def read(self):
-        """Read coverage data from the coverage data file (if it exists)."""
-        if self.use_file:
-            self.lines, self.arcs = self._read_file(self.filename)
+        # _lines: merge dicts.
+        if other_data._has_lines():
+            if self._lines is None:
+                self._lines = {}
+            for filename, file_lines in iitems(other_data._lines):
+                filename = aliases.map(filename)
+                if filename in self._lines:
+                    lines = set(self._lines[filename])
+                    lines.update(file_lines)
+                    file_lines = list(lines)
+                self._lines[filename] = file_lines
+
+        # _arcs: merge dicts.
+        if other_data._has_arcs():
+            if self._arcs is None:
+                self._arcs = {}
+            for filename, file_arcs in iitems(other_data._arcs):
+                filename = aliases.map(filename)
+                if filename in self._arcs:
+                    arcs = set(self._arcs[filename])
+                    arcs.update(file_arcs)
+                    file_arcs = list(arcs)
+                self._arcs[filename] = file_arcs
+
+        self._validate()
+
+    ##
+    ## Miscellaneous
+    ##
+
+    def _validate(self):
+        """If we are in paranoid mode, validate that everything is right."""
+        if env.TESTING:
+            self._validate_invariants()
+
+    def _validate_invariants(self):
+        """Validate internal invariants."""
+        # Only one of _lines or _arcs should exist.
+        assert not(self._has_lines() and self._has_arcs()), (
+            "Shouldn't have both _lines and _arcs"
+        )
+
+        # _lines should be a dict of lists of ints.
+        if self._has_lines():
+            for fname, lines in iitems(self._lines):
+                assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
+                assert all(isinstance(x, int) for x in lines), (
+                    "_lines[%r] shouldn't be %r" % (fname, lines)
+                )
+
+        # _arcs should be a dict of lists of pairs of ints.
+        if self._has_arcs():
+            for fname, arcs in iitems(self._arcs):
+                assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
+                assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
+                    "_arcs[%r] shouldn't be %r" % (fname, arcs)
+                )
+
+        # _file_tracers should have only non-empty strings as values.
+        for fname, plugin in iitems(self._file_tracers):
+            assert isinstance(fname, string_class), (
+                "Key in _file_tracers shouldn't be %r" % (fname,)
+            )
+            assert plugin and isinstance(plugin, string_class), (
+                "_file_tracers[%r] shoudn't be %r" % (fname, plugin)
+            )
+
+        # _runs should be a list of dicts.
+        for val in self._runs:
+            assert isinstance(val, dict)
+            for key in val:
+                assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
+
+    def add_to_hash(self, filename, hasher):
+        """Contribute `filename`'s data to the `hasher`.
+
+        `hasher` is a :class:`coverage.misc.Hasher` instance to be updated with
+        the file's data.  It should only get the results data, not the run
+        data.
+
+        """
+        if self._has_arcs():
+            hasher.update(sorted(self.arcs(filename) or []))
         else:
-            self.lines, self.arcs = {}, {}
+            hasher.update(sorted(self.lines(filename) or []))
+        hasher.update(self.file_tracer(filename))
+
+    ##
+    ## Internal
+    ##
+
+    def _has_lines(self):
+        """Do we have data in self._lines?"""
+        return self._lines is not None
+
+    def _has_arcs(self):
+        """Do we have data in self._arcs?"""
+        return self._arcs is not None
+
+
+class CoverageDataFiles(object):
+    """Manage the use of coverage data files."""
+
+    def __init__(self, basename=None):
+        """Create a CoverageDataFiles to manage data files.
+
+        `basename` is the name of the file to use for storing data.
 
-    def write(self, suffix=None):
+        """
+        # Construct the file name that will be used for data storage.
+        self.filename = os.path.abspath(basename or ".coverage")
+
+    def erase(self, parallel=False):
+        """Erase the data from the file storage.
+
+        If `parallel` is true, then also deletes data files created from the
+        basename by parallel-mode.
+
+        """
+        file_be_gone(self.filename)
+        if parallel:
+            data_dir, local = os.path.split(self.filename)
+            localdot = local + '.*'
+            pattern = os.path.join(os.path.abspath(data_dir), localdot)
+            for filename in glob.glob(pattern):
+                file_be_gone(filename)
+
+    def read(self, data):
+        """Read the coverage data."""
+        if os.path.exists(self.filename):
+            data.read_file(self.filename)
+
+    def write(self, data, suffix=None):
         """Write the collected coverage data to a file.
 
         `suffix` is a suffix to append to the base file name. This can be used
@@ -85,102 +645,27 @@
         the suffix.
 
         """
-        if self.use_file:
-            filename = self.filename
-            if suffix:
-                filename += "." + suffix
-            self.write_file(filename)
-
-    def erase(self):
-        """Erase the data, both in this object, and from its file storage."""
-        if self.use_file:
-            if self.filename:
-                file_be_gone(self.filename)
-        self.lines = {}
-        self.arcs = {}
-
-    def line_data(self):
-        """Return the map from filenames to lists of line numbers executed."""
-        return dict(
-            [(f.decode(sys.getfilesystemencoding()), sorted(lmap.keys()))
-                for f, lmap in iitems(self.lines)]
-            )
-
-    def arc_data(self):
-        """Return the map from filenames to lists of line number pairs."""
-        return dict(
-            [(f.decode(sys.getfilesystemencoding()), sorted(amap.keys()))
-                for f, amap in iitems(self.arcs)]
+        filename = self.filename
+        if suffix is True:
+            # If data_suffix was a simple true value, then make a suffix with
+            # plenty of distinguishing information.  We do this here in
+            # `save()` at the last minute so that the pid will be correct even
+            # if the process forks.
+            extra = ""
+            if _TEST_NAME_FILE:                             # pragma: debugging
+                with open(_TEST_NAME_FILE) as f:
+                    test_name = f.read()
+                extra = "." + test_name
+            suffix = "%s%s.%s.%06d" % (
+                socket.gethostname(), extra, os.getpid(),
+                random.randint(0, 999999)
             )
 
-    def write_file(self, filename):
-        """Write the coverage data to `filename`."""
-
-        # Create the file data.
-        data = {}
-
-        data['lines'] = self.line_data()
-        arcs = self.arc_data()
-        if arcs:
-            data['arcs'] = arcs
-
-        if self.collector:
-            data['collector'] = self.collector
-
-        if self.debug and self.debug.should('dataio'):
-            self.debug.write("Writing data to %r" % (filename,))
-
-        # Write the pickle to the file.
-        fdata = open(filename, 'wb')
-        try:
-            pickle.dump(data, fdata, 2)
-        finally:
-            fdata.close()
-
-    def read_file(self, filename):
-        """Read the coverage data from `filename`."""
-        self.lines, self.arcs = self._read_file(filename)
+        if suffix:
+            filename += "." + suffix
+        data.write_file(filename)
 
-    def raw_data(self, filename):
-        """Return the raw pickled data from `filename`."""
-        if self.debug and self.debug.should('dataio'):
-            self.debug.write("Reading data from %r" % (filename,))
-        fdata = open(filename, 'rb')
-        try:
-            data = pickle.load(fdata)
-        finally:
-            fdata.close()
-        return data
-
-    def _read_file(self, filename):
-        """Return the stored coverage data from the given file.
-
-        Returns two values, suitable for assigning to `self.lines` and
-        `self.arcs`.
-
-        """
-        lines = {}
-        arcs = {}
-        try:
-            data = self.raw_data(filename)
-            if isinstance(data, dict):
-                # Unpack the 'lines' item.
-                lines = dict([
-                    (f.encode(sys.getfilesystemencoding()),
-                        dict.fromkeys(linenos, None))
-                        for f, linenos in iitems(data.get('lines', {}))
-                    ])
-                # Unpack the 'arcs' item.
-                arcs = dict([
-                    (f.encode(sys.getfilesystemencoding()),
-                        dict.fromkeys(arcpairs, None))
-                        for f, arcpairs in iitems(data.get('arcs', {}))
-                    ])
-        except Exception:
-            pass
-        return lines, arcs
-
-    def combine_parallel_data(self, aliases=None):
+    def combine_parallel_data(self, data, aliases=None, data_paths=None):
         """Combine a number of data files together.
 
         Treat `self.filename` as a file prefix, and combine the data from all
@@ -189,98 +674,87 @@
         If `aliases` is provided, it's a `PathAliases` object that is used to
         re-map paths to match the local machine's.
 
-        """
-        aliases = aliases or PathAliases()
-        data_dir, local = os.path.split(self.filename)
-        localdot = local + '.'
-        for f in os.listdir(data_dir or '.'):
-            if f.startswith(localdot):
-                full_path = os.path.join(data_dir, f)
-                new_lines, new_arcs = self._read_file(full_path)
-                for filename, file_data in iitems(new_lines):
-                    filename = aliases.map(filename)
-                    self.lines.setdefault(filename, {}).update(file_data)
-                for filename, file_data in iitems(new_arcs):
-                    filename = aliases.map(filename)
-                    self.arcs.setdefault(filename, {}).update(file_data)
-                if f != local:
-                    os.remove(full_path)
+        If `data_paths` is provided, it is a list of directories or files to
+        combine.  Directories are searched for files that start with
+        `self.filename` plus dot as a prefix, and those files are combined.
 
-    def add_line_data(self, line_data):
-        """Add executed line data.
-
-        `line_data` is { filename: { lineno: None, ... }, ...}
+        If `data_paths` is not provided, then the directory portion of
+        `self.filename` is used as the directory to search for data files.
 
-        """
-        for filename, linenos in iitems(line_data):
-            self.lines.setdefault(filename, {}).update(linenos)
-
-    def add_arc_data(self, arc_data):
-        """Add measured arc data.
-
-        `arc_data` is { filename: { (l1,l2): None, ... }, ...}
+        Every data file found and combined is then deleted from disk.
 
         """
-        for filename, arcs in iitems(arc_data):
-            self.arcs.setdefault(filename, {}).update(arcs)
+        # Because of the os.path.abspath in the constructor, data_dir will
+        # never be an empty string.
+        data_dir, local = os.path.split(self.filename)
+        localdot = local + '.*'
 
-    def touch_file(self, filename):
-        """Ensure that `filename` appears in the data, empty if needed."""
-        self.lines.setdefault(filename, {})
-
-    def measured_files(self):
-        """A list of all files that had been measured."""
-        return list(self.lines.keys())
+        data_paths = data_paths or [data_dir]
+        files_to_combine = []
+        for p in data_paths:
+            if os.path.isfile(p):
+                files_to_combine.append(os.path.abspath(p))
+            elif os.path.isdir(p):
+                pattern = os.path.join(os.path.abspath(p), localdot)
+                files_to_combine.extend(glob.glob(pattern))
+            else:
+                raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
 
-    def executed_lines(self, filename):
-        """A map containing all the line numbers executed in `filename`.
+        for f in files_to_combine:
+            new_data = CoverageData()
+            new_data.read_file(f)
+            data.update(new_data, aliases=aliases)
+            file_be_gone(f)
 
-        If `filename` hasn't been collected at all (because it wasn't executed)
-        then return an empty map.
 
-        """
-        return self.lines.get(filename) or {}
+def canonicalize_json_data(data):
+    """Canonicalize our JSON data so it can be compared."""
+    for fname, lines in iitems(data.get('lines', {})):
+        data['lines'][fname] = sorted(lines)
+    for fname, arcs in iitems(data.get('arcs', {})):
+        data['arcs'][fname] = sorted(arcs)
 
-    def executed_arcs(self, filename):
-        """A map containing all the arcs executed in `filename`."""
-        return self.arcs.get(filename) or {}
+
+def pretty_data(data):
+    """Format data as JSON, but as nicely as possible.
 
-    def add_to_hash(self, filename, hasher):
-        """Contribute `filename`'s data to the Md5Hash `hasher`."""
-        hasher.update(self.executed_lines(filename))
-        hasher.update(self.executed_arcs(filename))
+    Returns a string.
 
-    def summary(self, fullpath=False):
-        """Return a dict summarizing the coverage data.
+    """
+    # Start with a basic JSON dump.
+    out = json.dumps(data, indent=4, sort_keys=True)
+    # But pairs of numbers shouldn't be split across lines...
+    out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
+    # Trailing spaces mess with tests, get rid of them.
+    out = re.sub(r"(?m)\s+$", "", out)
+    return out
 
-        Keys are based on the filenames, and values are the number of executed
-        lines.  If `fullpath` is true, then the keys are the full pathnames of
-        the files, otherwise they are the basenames of the files.
+
+def debug_main(args):
+    """Dump the raw data from data files.
+
+    Run this as::
 
-        """
-        summ = {}
-        if fullpath:
-            filename_fn = lambda f: f
-        else:
-            filename_fn = os.path.basename
-        for filename, lines in iitems(self.lines):
-            summ[filename_fn(filename)] = len(lines)
-        return summ
+        $ python -m coverage.data [FILE]
 
-    def has_arcs(self):
-        """Does this data have arcs?"""
-        return bool(self.arcs)
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        "-c", "--canonical", action="store_true",
+        help="Sort data into a canonical order",
+    )
+    options, args = parser.parse_args(args)
+
+    for filename in (args or [".coverage"]):
+        print("--- {0} ------------------------------".format(filename))
+        data = CoverageData._read_raw_data_file(filename)
+        if options.canonical:
+            canonicalize_json_data(data)
+        print(pretty_data(data))
 
 
 if __name__ == '__main__':
-    # Ad-hoc: show the raw data in a data file.
-    import pprint, sys
-    covdata = CoverageData()
-    if sys.argv[1:]:
-        fname = sys.argv[1]
-    else:
-        fname = covdata.filename
-    pprint.pprint(covdata.raw_data(fname))
+    debug_main(sys.argv[1:])
 
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/debug.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/debug.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,6 +1,11 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Control of and utilities for debugging."""
 
+import inspect
 import os
+import sys
 
 
 # When debugging, it can be helpful to force some options, especially when
@@ -8,6 +13,9 @@
 # This is a list of forced debugging options.
 FORCED_DEBUG = []
 
+# A hack for debugging testing in sub-processes.
+_TEST_NAME_FILE = ""    # "/tmp/covtest.txt"
+
 
 class DebugControl(object):
     """Control and output for debugging."""
@@ -17,6 +25,9 @@
         self.options = options
         self.output = output
 
+    def __repr__(self):
+        return "<DebugControl options=%r output=%r>" % (self.options, self.output)
+
     def should(self, option):
         """Decide whether to output debug information in category `option`."""
         return (option in self.options or option in FORCED_DEBUG)
@@ -26,14 +37,22 @@
         if self.should('pid'):
             msg = "pid %5d: %s" % (os.getpid(), msg)
         self.output.write(msg+"\n")
+        if self.should('callers'):
+            dump_stack_frames(self.output)
         self.output.flush()
 
-    def write_formatted_info(self, info):
+    def write_formatted_info(self, header, info):
         """Write a sequence of (label,data) pairs nicely."""
+        self.write(info_header(header))
         for line in info_formatter(info):
             self.write(" %s" % line)
 
 
+def info_header(label):
+    """Make a nice header string."""
+    return "--{0:-<60s}".format(" "+label+" ")
+
+
 def info_formatter(info):
     """Produce a sequence of formatted lines from info.
 
@@ -41,11 +60,14 @@
     nicely formatted, ready to print.
 
     """
-    label_len = max([len(l) for l, _d in info])
+    info = list(info)
+    if not info:
+        return
+    label_len = max(len(l) for l, _d in info)
     for label, data in info:
         if data == []:
             data = "-none-"
-        if isinstance(data, (list, tuple)):
+        if isinstance(data, (list, set, tuple)):
             prefix = "%*s:" % (label_len, label)
             for e in data:
                 yield "%*s %s" % (label_len+1, prefix, e)
@@ -53,5 +75,29 @@
         else:
             yield "%*s: %s" % (label_len, label, data)
 
+
+def short_stack():                                          # pragma: debugging
+    """Return a string summarizing the call stack.
+
+    The string is multi-line, with one line per stack frame. Each line shows
+    the function name, the file name, and the line number:
+
+        ...
+        start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
+        import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
+        import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
+        ...
+
+    """
+    stack = inspect.stack()[:0:-1]
+    return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack)
+
+
+def dump_stack_frames(out=None):                            # pragma: debugging
+    """Print a summary of the stack to stdout, or some place else."""
+    out = out or sys.stdout
+    out.write(short_stack())
+    out.write("\n")
+
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/doc/AUTHORS.txt	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/doc/AUTHORS.txt	Sat Oct 10 12:44:52 2015 +0200
@@ -3,41 +3,66 @@
 
 Other contributions have been made by:
 
-Marc Abramowitz
+Adi Roiban
+Alex Gaynor
+Alexander Todorov
+Anthony Sottile
+Ben Finney
+Bill Hart
+Brandon Rhodes
+Brett Cannon
+Buck Evan
+Carl Gieringer
+Catherine Proulx
 Chris Adams
-Geoff Bache
-Julian Berman
-Titus Brown
-Brett Cannon
-Pablo Carballo
-Guillaume Chazarain
+Chris Rose
+Christian Heimes
+Christine Lytwynec
+Christoph Zwerschke
+Conrad Ho
+Danek Duvall
+Danny Allen
 David Christian
-Marcus Cobden
-Matthew Desmarais
-Danek Duvall
-Ben Finney
-Martin Fuzzey
-Imri Goldberg
-Bill Hart
-Christian Heimes
-Roger Hu
+David Stanek
+Detlev Offenbach
 Devin Jeanpierre
-Ross Lawley
+Dmitry Trofimov
+Eduardo Schettino
 Edward Loper
-Sandra Martocchia
-Patrick Mezard
-Noel O'Boyle
-Detlev Offenbach
-JT Olds
+Geoff Bache
 George Paci
-Catherine Proulx
-Brandon Rhodes
-Adi Roiban
+George Song
 Greg Rogers
-George Song
-David Stanek
+Guillaume Chazarain
+Imri Goldberg
+Ionel Cristian Mărieș
+JT Olds
+Jessamyn Smith
+Jon Chappell
 Joseph Tate
+Julian Berman
+Krystian Kichewko
+Leonardo Pistone
+Lex Berezhny
+Marc Abramowitz
+Marcus Cobden
+Mark van der Wal
+Martin Fuzzey
+Matthew Desmarais
+Mickie Betz
+Noel O'Boyle
+Pablo Carballo
+Patrick Mezard
+Peter Portante
+Roger Hu
+Ross Lawley
+Sandra Martocchia
 Sigve Tjora
-Mark van der Wal
+Stan Hu
+Stefan Behnel
+Steve Leonard
+Steve Peak
+Ted Wexler
+Titus Brown
+Yury Selivanov
 Zooko Wilcox-O'Hearn
-Christoph Zwerschke
--- a/DebugClients/Python/coverage/doc/CHANGES.txt	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/doc/CHANGES.txt	Sat Oct 10 12:44:52 2015 +0200
@@ -1,9 +1,413 @@
-------------------------------
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+==============================
 Change history for Coverage.py
+==============================
+
+
+Version 4.0 --- 20 September 2015
+---------------------------------
+
+No changes from 4.0b3
+
+
+Version 4.0b3 --- 7 September 2015
+----------------------------------
+
+- Reporting on an unmeasured file would fail with a traceback.  This is now
+  fixed, closing `issue 403`_.
+
+- The Jenkins ShiningPanda plugin looks for an obsolete file name to find the
+  HTML reports to publish, so it was failing under coverage.py 4.0.  Now we
+  create that file if we are running under Jenkins, to keep things working
+  smoothly. `issue 404`_.
+
+- Kits used to include tests and docs, but didn't install them anywhere, or
+  provide all of the supporting tools to make them useful.  Kits no longer
+  include tests and docs.  If you were using them from the older packages, get
+  in touch and help me understand how.
+
+.. _issue 403: https://bitbucket.org/ned/coveragepy/issues/403/hasherupdate-fails-with-typeerror-nonetype
+.. _issue 404: https://bitbucket.org/ned/coveragepy/issues/404/shiningpanda-jenkins-plugin-cant-find-html
+
+
+
+Version 4.0b2 --- 22 August 2015
+--------------------------------
+
+- 4.0b1 broke --append creating new data files.  This is now fixed, closing
+  `issue 392`_.
+
+- ``py.test --cov`` can write empty data, then touch files due to ``--source``,
+  which made coverage.py mistakenly force the data file to record lines instead
+  of arcs.  This would lead to a "Can't combine line data with arc data" error
+  message.  This is now fixed, and changed some method names in the
+  CoverageData interface.  Fixes `issue 399`_.
+
+- `CoverageData.read_fileobj` and `CoverageData.write_fileobj` replace the
+  `.read` and `.write` methods, and are now properly inverses of each other.
+
+- When using ``report --skip-covered``, a message will now be included in the
+  report output indicating how many files were skipped, and if all files are
+  skipped, coverage.py won't accidentally scold you for having no data to
+  report.  Thanks, Krystian Kichewko.
+
+- A new conversion utility has been added:  ``python -m coverage.pickle2json``
+  will convert v3.x pickle data files to v4.x JSON data files.  Thanks,
+  Alexander Todorov.  Closes `issue 395`_.
+
+- A new version identifier is available, `coverage.version_info`, a plain tuple
+  of values similar to `sys.version_info`_.
+
+.. _issue 392: https://bitbucket.org/ned/coveragepy/issues/392/run-append-doesnt-create-coverage-file
+.. _issue 395: https://bitbucket.org/ned/coveragepy/issues/395/rfe-read-pickled-files-as-well-for
+.. _issue 399: https://bitbucket.org/ned/coveragepy/issues/399/coverageexception-cant-combine-line-data
+.. _sys.version_info: https://docs.python.org/3/library/sys.html#sys.version_info
+
+
+Version 4.0b1 --- 2 August 2015
+-------------------------------
+
+- Coverage.py is now licensed under the Apache 2.0 license.  See NOTICE.txt for
+  details.  Closes `issue 313`_.
+
+- The data storage has been completely revamped.  The data file is now
+  JSON-based instead of a pickle, closing `issue 236`_.  The `CoverageData`
+  class is now a public supported documented API to the data file.
+
+- A new configuration option, ``[run] note``, lets you set a note that will be
+  stored in the `runs` section of the data file.  You can use this to annotate
+  the data file with any information you like.
+
+- Unrecognized configuration options will now print an error message and stop
+  coverage.py.  This should help prevent configuration mistakes from passing
+  silently.  Finishes `issue 386`_.
+
+- In parallel mode, ``coverage erase`` will now delete all of the data files,
+  fixing `issue 262`_.
+
+- Coverage.py now accepts a directory name for ``coverage run`` and will run a
+  ``__main__.py`` found there, just like Python will.  Fixes `issue 252`_.
+  Thanks, Dmitry Trofimov.
+
+- The XML report now includes a ``missing-branches`` attribute.  Thanks, Steve
+  Peak.  This is not a part of the Cobertura DTD, so the XML report no longer
+  references the DTD.
+
+- Missing branches in the HTML report now have a bit more information in the
+  right-hand annotations.  Hopefully this will make their meaning clearer.
+
+- All the reporting functions now behave the same if no data had been
+  collected, exiting with a status code of 1.  Fixed ``fail_under`` to be
+  applied even when the report is empty.  Thanks, Ionel Cristian Mărieș.
+
+- Plugins are now initialized differently.  Instead of looking for a class
+  called ``Plugin``, coverage.py looks for a function called ``coverage_init``.
+
+- A file-tracing plugin can now ask to have built-in Python reporting by
+  returning `"python"` from its `file_reporter()` method.
+
+- Code that was executed with `exec` would be mis-attributed to the file that
+  called it.  This is now fixed, closing `issue 380`_.
+
+- The ability to use item access on `Coverage.config` (introduced in 4.0a2) has
+  been changed to a more explicit `Coverage.get_option` and
+  `Coverage.set_option` API.
+
+- The ``Coverage.use_cache`` method is no longer supported.
+
+- The private method ``Coverage._harvest_data`` is now called
+  ``Coverage.get_data``, and returns the ``CoverageData`` containing the
+  collected data.
+
+- The project is consistently referred to as "coverage.py" throughout the code
+  and the documentation, closing `issue 275`_.
+
+- Combining data files with an explicit configuration file was broken in 4.0a6,
+  but now works again, closing `issue 385`_.
+
+- ``coverage combine`` now accepts files as well as directories.
+
+- The speed is back to 3.7.1 levels, after having slowed down due to plugin
+  support, finishing up `issue 387`_.
+
+.. _issue 236: https://bitbucket.org/ned/coveragepy/issues/236/pickles-are-bad-and-you-should-feel-bad
+.. _issue 252: https://bitbucket.org/ned/coveragepy/issues/252/coverage-wont-run-a-program-with
+.. _issue 262: https://bitbucket.org/ned/coveragepy/issues/262/when-parallel-true-erase-should-erase-all
+.. _issue 275: https://bitbucket.org/ned/coveragepy/issues/275/refer-consistently-to-project-as-coverage
+.. _issue 313: https://bitbucket.org/ned/coveragepy/issues/313/add-license-file-containing-2-3-or-4
+.. _issue 380: https://bitbucket.org/ned/coveragepy/issues/380/code-executed-by-exec-excluded-from
+.. _issue 385: https://bitbucket.org/ned/coveragepy/issues/385/coverage-combine-doesnt-work-with-rcfile
+.. _issue 386: https://bitbucket.org/ned/coveragepy/issues/386/error-on-unrecognised-configuration
+.. _issue 387: https://bitbucket.org/ned/coveragepy/issues/387/performance-degradation-from-371-to-40
+
+.. 40 issues closed in 4.0 below here
+
+
+Version 4.0a6 --- 21 June 2015
 ------------------------------
 
-3.7.1 -- 13 December 2013
--------------------------
+- Python 3.5b2 and PyPy 2.6.0 are supported.
+
+- The original module-level function interface to coverage.py is no longer
+  supported.  You must now create a ``coverage.Coverage`` object, and use
+  methods on it.
+
+- The ``coverage combine`` command now accepts any number of directories as
+  arguments, and will combine all the data files from those directories.  This
+  means you don't have to copy the files to one directory before combining.
+  Thanks, Christine Lytwynec.  Finishes `issue 354`_.
+
+- Branch coverage couldn't properly handle certain extremely long files. This
+  is now fixed (`issue 359`_).
+
+- Branch coverage didn't understand yield statements properly.  Mickie Betz
+  persisted in pursuing this despite Ned's pessimism.  Fixes `issue 308`_ and
+  `issue 324`_.
+
+- The COVERAGE_DEBUG environment variable can be used to set the ``[run] debug``
+  configuration option to control what internal operations are logged.
+
+- HTML reports were truncated at formfeed characters.  This is now fixed
+  (`issue 360`_).  It's always fun when the problem is due to a `bug in the
+  Python standard library <http://bugs.python.org/issue19035>`_.
+
+- Files with incorrect encoding declaration comments are no longer ignored by
+  the reporting commands, fixing `issue 351`_.
+
+- HTML reports now include a timestamp in the footer, closing `issue 299`_.
+  Thanks, Conrad Ho.
+
+- HTML reports now begrudgingly use double-quotes rather than single quotes,
+  because there are "software engineers" out there writing tools that read HTML
+  and somehow have no idea that single quotes exist.  Capitulates to the absurd
+  `issue 361`_.  Thanks, Jon Chappell.
+
+- The ``coverage annotate`` command now handles non-ASCII characters properly,
+  closing `issue 363`_.  Thanks, Leonardo Pistone.
+
+- Drive letters on Windows were not normalized correctly, now they are. Thanks,
+  Ionel Cristian Mărieș.
+
+- Plugin support had some bugs fixed, closing `issue 374`_ and `issue 375`_.
+  Thanks, Stefan Behnel.
+
+.. _issue 299: https://bitbucket.org/ned/coveragepy/issue/299/inserted-created-on-yyyy-mm-dd-hh-mm-in
+.. _issue 308: https://bitbucket.org/ned/coveragepy/issue/308/yield-lambda-branch-coverage
+.. _issue 324: https://bitbucket.org/ned/coveragepy/issue/324/yield-in-loop-confuses-branch-coverage
+.. _issue 351: https://bitbucket.org/ned/coveragepy/issue/351/files-with-incorrect-encoding-are-ignored
+.. _issue 354: https://bitbucket.org/ned/coveragepy/issue/354/coverage-combine-should-take-a-list-of
+.. _issue 359: https://bitbucket.org/ned/coveragepy/issue/359/xml-report-chunk-error
+.. _issue 360: https://bitbucket.org/ned/coveragepy/issue/360/html-reports-get-confused-by-l-in-the-code
+.. _issue 361: https://bitbucket.org/ned/coveragepy/issue/361/use-double-quotes-in-html-output-to
+.. _issue 363: https://bitbucket.org/ned/coveragepy/issue/363/annotate-command-hits-unicode-happy-fun
+.. _issue 374: https://bitbucket.org/ned/coveragepy/issue/374/c-tracer-lookups-fail-in
+.. _issue 375: https://bitbucket.org/ned/coveragepy/issue/375/ctracer_handle_return-reads-byte-code
+
+
+Version 4.0a5 --- 16 February 2015
+----------------------------------
+
+- Plugin support is now implemented in the C tracer instead of the Python
+  tracer. This greatly improves the speed of tracing projects using plugins.
+
+- Coverage.py now always adds the current directory to sys.path, so that
+  plugins can import files in the current directory (`issue 358`_).
+
+- If the `config_file` argument to the Coverage constructor is specified as
+  ".coveragerc", it is treated as if it were True.  This means setup.cfg is
+  also examined, and a missing file is not considered an error (`issue 357`_).
+
+- Wildly experimental: support for measuring processes started by the
+  multiprocessing module.  To use, set ``--concurrency=multiprocessing``,
+  either on the command line or in the .coveragerc file (`issue 117`_). Thanks,
+  Eduardo Schettino.  Currently, this does not work on Windows.
+
+- A new warning is possible, if a desired file isn't measured because it was
+  imported before coverage.py was started (`issue 353`_).
+
+- The `coverage.process_startup` function now will start coverage measurement
+  only once, no matter how many times it is called.  This fixes problems due
+  to unusual virtualenv configurations (`issue 340`_).
+
+- Added 3.5.0a1 to the list of supported CPython versions.
+
+.. _issue 117: https://bitbucket.org/ned/coveragepy/issue/117/enable-coverage-measurement-of-code-run-by
+.. _issue 340: https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy
+.. _issue 353: https://bitbucket.org/ned/coveragepy/issue/353/40a3-introduces-an-unexpected-third-case
+.. _issue 357: https://bitbucket.org/ned/coveragepy/issue/357/behavior-changed-when-coveragerc-is
+.. _issue 358: https://bitbucket.org/ned/coveragepy/issue/358/all-coverage-commands-should-adjust
+
+
+Version 4.0a4 --- 25 January 2015
+---------------------------------
+
+- Plugins can now provide sys_info for debugging output.
+
+- Started plugins documentation.
+
+- Prepared to move the docs to readthedocs.org.
+
+
+Version 4.0a3 --- 20 January 2015
+---------------------------------
+
+- Reports now use file names with extensions.  Previously, a report would
+  describe a/b/c.py as "a/b/c".  Now it is shown as "a/b/c.py".  This allows
+  for better support of non-Python files, and also fixed `issue 69`_.
+
+- The XML report now reports each directory as a package again.  This was a bad
+  regression, I apologize.  This was reported in `issue 235`_, which is now
+  fixed.
+
+- A new configuration option for the XML report: ``[xml] package_depth``
+  controls which directories are identified as packages in the report.
+  Directories deeper than this depth are not reported as packages.
+  The default is that all directories are reported as packages.
+  Thanks, Lex Berezhny.
+
+- When looking for the source for a frame, check if the file exists. On
+  Windows, .pyw files are no longer recorded as .py files. Along the way, this
+  fixed `issue 290`_.
+
+- Empty files are now reported as 100% covered in the XML report, not 0%
+  covered (`issue 345`_).
+
+- Regexes in the configuration file are now compiled as soon as they are read,
+  to provide error messages earlier (`issue 349`_).
+
+.. _issue 69: https://bitbucket.org/ned/coveragepy/issue/69/coverage-html-overwrite-files-that-doesnt
+.. _issue 235: https://bitbucket.org/ned/coveragepy/issue/235/package-name-is-missing-in-xml-report
+.. _issue 290: https://bitbucket.org/ned/coveragepy/issue/290/running-programmatically-with-pyw-files
+.. _issue 345: https://bitbucket.org/ned/coveragepy/issue/345/xml-reports-line-rate-0-for-empty-files
+.. _issue 349: https://bitbucket.org/ned/coveragepy/issue/349/bad-regex-in-config-should-get-an-earlier
+
+
+Version 4.0a2 --- 14 January 2015
+---------------------------------
+
+- Officially support PyPy 2.4, and PyPy3 2.4.  Drop support for
+  CPython 3.2 and older versions of PyPy.  The code won't work on CPython 3.2.
+  It will probably still work on older versions of PyPy, but I'm not testing
+  against them.
+
+- Plugins!
+
+- The original command line switches (`-x` to run a program, etc) are no
+  longer supported.
+
+- A new option: `coverage report --skip-covered` will reduce the number of
+  files reported by skipping files with 100% coverage.  Thanks, Krystian
+  Kichewko.  This means that empty `__init__.py` files will be skipped, since
+  they are 100% covered, closing `issue 315`_.
+
+- You can now specify the ``--fail-under`` option in the ``.coveragerc`` file
+  as the ``[report] fail_under`` option.  This closes `issue 314`_.
+
+- The ``COVERAGE_OPTIONS`` environment variable is no longer supported.  It was
+  a hack for ``--timid`` before configuration files were available.
+
+- The HTML report now has filtering.  Type text into the Filter box on the
+  index page, and only modules with that text in the name will be shown.
+  Thanks, Danny Allen.
+
+- The textual report and the HTML report used to report partial branches
+  differently for no good reason.  Now the text report's "missing branches"
+  column is a "partial branches" column so that both reports show the same
+  numbers.  This closes `issue 342`_.
+
+- If you specify a ``--rcfile`` that cannot be read, you will get an error
+  message.  Fixes `issue 343`_.
+
+- The ``--debug`` switch can now be used on any command.
+
+- You can now programmatically adjust the configuration of coverage.py by
+  setting items on `Coverage.config` after construction.
+
+- A module run with ``-m`` can be used as the argument to ``--source``, fixing
+  `issue 328`_.  Thanks, Buck Evan.
+
+- The regex for matching exclusion pragmas has been fixed to allow more kinds
+  of whitespace, fixing `issue 334`_.
+
+- Made some PyPy-specific tweaks to improve speed under PyPy.  Thanks, Alex
+  Gaynor.
+
+- In some cases, with a source file missing a final newline, coverage.py would
+  count statements incorrectly.  This is now fixed, closing `issue 293`_.
+
+- The status.dat file that HTML reports use to avoid re-creating files that
+  haven't changed is now a JSON file instead of a pickle file.  This obviates
+  `issue 287`_ and `issue 237`_.
+
+.. _issue 237: https://bitbucket.org/ned/coveragepy/issue/237/htmlcov-with-corrupt-statusdat
+.. _issue 287: https://bitbucket.org/ned/coveragepy/issue/287/htmlpy-doesnt-specify-pickle-protocol
+.. _issue 293: https://bitbucket.org/ned/coveragepy/issue/293/number-of-statement-detection-wrong-if-no
+.. _issue 314: https://bitbucket.org/ned/coveragepy/issue/314/fail_under-param-not-working-in-coveragerc
+.. _issue 315: https://bitbucket.org/ned/coveragepy/issue/315/option-to-omit-empty-files-eg-__init__py
+.. _issue 328: https://bitbucket.org/ned/coveragepy/issue/328/misbehavior-in-run-source
+.. _issue 334: https://bitbucket.org/ned/coveragepy/issue/334/pragma-not-recognized-if-tab-character
+.. _issue 342: https://bitbucket.org/ned/coveragepy/issue/342/console-and-html-coverage-reports-differ
+.. _issue 343: https://bitbucket.org/ned/coveragepy/issue/343/an-explicitly-named-non-existent-config
+
+
+Version 4.0a1 --- 27 September 2014
+-----------------------------------
+
+- Python versions supported are now CPython 2.6, 2.7, 3.2, 3.3, and 3.4, and
+  PyPy 2.2.
+
+- Gevent, eventlet, and greenlet are now supported, closing `issue 149`_.
+  The ``concurrency`` setting specifies the concurrency library in use.  Huge
+  thanks to Peter Portante for initial implementation, and to Joe Jevnik for
+  the final insight that completed the work.
+
+- Options are now also read from a setup.cfg file, if any.  Sections are
+  prefixed with "coverage:", so the ``[run]`` options will be read from the
+  ``[coverage:run]`` section of setup.cfg.  Finishes `issue 304`_.
+
+- The ``report -m`` command can now show missing branches when reporting on
+  branch coverage.  Thanks, Steve Leonard. Closes `issue 230`_.
+
+- The XML report now contains a <source> element, fixing `issue 94`_.  Thanks
+  Stan Hu.
+
+- The class defined in the coverage module is now called ``Coverage`` instead
+  of ``coverage``, though the old name still works, for backward compatibility.
+
+- The ``fail-under`` value is now rounded the same as reported results,
+  preventing paradoxical results, fixing `issue 284`_.
+
+- The XML report will now create the output directory if need be, fixing
+  `issue 285`_.  Thanks, Chris Rose.
+
+- HTML reports no longer raise UnicodeDecodeError if a Python file has
+  undecodable characters, fixing `issue 303`_ and `issue 331`_.
+
+- The annotate command will now annotate all files, not just ones relative to
+  the current directory, fixing `issue 57`_.
+
+- The coverage module no longer causes deprecation warnings on Python 3.4 by
+  importing the imp module, fixing `issue 305`_.
+
+- Encoding declarations in source files are only considered if they are truly
+  comments.  Thanks, Anthony Sottile.
+
+.. _issue 57: https://bitbucket.org/ned/coveragepy/issue/57/annotate-command-fails-to-annotate-many
+.. _issue 94: https://bitbucket.org/ned/coveragepy/issue/94/coverage-xml-doesnt-produce-sources
+.. _issue 149: https://bitbucket.org/ned/coveragepy/issue/149/coverage-gevent-looks-broken
+.. _issue 230: https://bitbucket.org/ned/coveragepy/issue/230/show-line-no-for-missing-branches-in
+.. _issue 284: https://bitbucket.org/ned/coveragepy/issue/284/fail-under-should-show-more-precision
+.. _issue 285: https://bitbucket.org/ned/coveragepy/issue/285/xml-report-fails-if-output-file-directory
+.. _issue 303: https://bitbucket.org/ned/coveragepy/issue/303/unicodedecodeerror
+.. _issue 304: https://bitbucket.org/ned/coveragepy/issue/304/attempt-to-get-configuration-from-setupcfg
+.. _issue 305: https://bitbucket.org/ned/coveragepy/issue/305/pendingdeprecationwarning-the-imp-module
+.. _issue 331: https://bitbucket.org/ned/coveragepy/issue/331/failure-of-encoding-detection-on-python2
+
+
+Version 3.7.1 --- 13 December 2013
+----------------------------------
 
 - Improved the speed of HTML report generation by about 20%.
 
@@ -11,8 +415,8 @@
   so that it will actually find OS-installed static files.
 
 
-3.7 --- 6 October 2013
-----------------------
+Version 3.7 --- 6 October 2013
+------------------------------
 
 - Added the ``--debug`` switch to ``coverage run``.  It accepts a list of
   options indicating the type of internal activity to log to stderr.
@@ -22,9 +426,9 @@
 - Running code with ``coverage run -m`` now behaves more like Python does,
   setting sys.path properly, which fixes `issue 207`_ and `issue 242`_.
 
-- Coverage can now run .pyc files directly, closing `issue 264`_.
+- Coverage.py can now run .pyc files directly, closing `issue 264`_.
 
-- Coverage properly supports .pyw files, fixing `issue 261`_.
+- Coverage.py properly supports .pyw files, fixing `issue 261`_.
 
 - Omitting files within a tree specified with the ``source`` option would
   cause them to be incorrectly marked as unexecuted, as described in
@@ -40,9 +444,10 @@
 - Trying to create an XML report with no files to report on, would cause a
   ZeroDivideError, but no longer does, fixing `issue 250`_.
 
-- When running a threaded program under the Python tracer, coverage no longer
-  issues a spurious warning about the trace function changing: "Trace function
-  changed, measurement is likely wrong: None."  This fixes `issue 164`_.
+- When running a threaded program under the Python tracer, coverage.py no
+  longer issues a spurious warning about the trace function changing: "Trace
+  function changed, measurement is likely wrong: None."  This fixes `issue
+  164`_.
 
 - Static files necessary for HTML reports are found in system-installed places,
   to ease OS-level packaging of coverage.py.  Closes `issue 259`_.
@@ -99,7 +504,7 @@
 - ``debug sys`` now shows the configuration file path that was read.
 
 - If an oddly-behaved package claims that code came from an empty-string
-  filename, coverage.py no longer associates it with the directory name,
+  file name, coverage.py no longer associates it with the directory name,
   fixing `issue 221`_.
 
 .. _issue 80: https://bitbucket.org/ned/coveragepy/issue/80/is-there-a-duck-typing-way-to-know-we-cant
@@ -131,10 +536,10 @@
 - Configuration files now support substitution of environment variables, using
   syntax like ``${WORD}``.  Closes `issue 97`_.
 
-- Embarrassingly, the `[xml] output=` setting in the .coveragerc file simply
+- Embarrassingly, the ``[xml] output=`` setting in the .coveragerc file simply
   didn't work.  Now it does.
 
-- The XML report now consistently uses filenames for the filename attribute,
+- The XML report now consistently uses file names for the file name attribute,
   rather than sometimes using module names.  Fixes `issue 67`_.
   Thanks, Marcus Cobden.
 
@@ -175,8 +580,9 @@
 
 - Jython files now work with the ``--source`` option, fixing `issue 100`_.
 
-- Running coverage under a debugger is unlikely to work, but it shouldn't fail
-  with "TypeError: 'NoneType' object is not iterable".  Fixes `issue 201`_.
+- Running coverage.py under a debugger is unlikely to work, but it shouldn't
+  fail with "TypeError: 'NoneType' object is not iterable".  Fixes `issue
+  201`_.
 
 - On some Linux distributions, when installed with the OS package manager,
   coverage.py would report its own code as part of the results.  Now it won't,
@@ -186,7 +592,7 @@
 - Docstrings for the legacy singleton methods are more helpful.  Thanks Marius
   Gedminas.  Closes `issue 205`_.
 
-- The pydoc tool can now show docmentation for the class `coverage.coverage`.
+- The pydoc tool can now show documentation for the class `coverage.coverage`.
   Closes `issue 206`_.
 
 - Added a page to the docs about contributing to coverage.py, closing
@@ -198,7 +604,6 @@
 
 .. _issue 60: https://bitbucket.org/ned/coveragepy/issue/60/incorrect-path-to-orphaned-pyc-files
 .. _issue 67: https://bitbucket.org/ned/coveragepy/issue/67/xml-report-filenames-may-be-generated
-.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report
 .. _issue 89: https://bitbucket.org/ned/coveragepy/issue/89/on-windows-all-packages-are-reported-in
 .. _issue 97: https://bitbucket.org/ned/coveragepy/issue/97/allow-environment-variables-to-be
 .. _issue 100: https://bitbucket.org/ned/coveragepy/issue/100/source-directive-doesnt-work-for-packages
@@ -227,8 +632,8 @@
   `issue 197`_, thanks Marius Gedminas.
 
 - When specifying a directory as the source= option, the directory itself no
-  longer needs to have a ``__init__.py`` file, though its subdirectories do, to
-  be considered as source files.
+  longer needs to have a ``__init__.py`` file, though its sub-directories do,
+  to be considered as source files.
 
 - Files encoded as UTF-8 with a BOM are now properly handled, fixing
   `issue 179`_.  Thanks, Pablo Carballo.
@@ -250,7 +655,6 @@
 
 - Testing is now done with `tox`_, thanks, Marc Abramowitz.
 
-.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report
 .. _issue 147: https://bitbucket.org/ned/coveragepy/issue/147/massive-memory-usage-by-ctracer
 .. _issue 179: https://bitbucket.org/ned/coveragepy/issue/179/htmlreporter-fails-when-source-file-is
 .. _issue 183: https://bitbucket.org/ned/coveragepy/issue/183/install-fails-for-python-23
@@ -260,7 +664,6 @@
 .. _tox: http://tox.readthedocs.org/
 
 
-
 Version 3.5.2 --- 4 May 2012
 ----------------------------
 
@@ -274,7 +677,7 @@
   the page are color-coded to the source lines they affect.
 
 - Custom CSS can be applied to the HTML report by specifying a CSS file as
-  the extra_css configuration value in the [html] section.
+  the ``extra_css`` configuration value in the ``[html]`` section.
 
 - Source files with custom encodings declared in a comment at the top are now
   properly handled during reporting on Python 2.  Python 3 always handled them
@@ -284,11 +687,11 @@
   option, fixing `issue 168`_.
 
 - If a file doesn't parse properly as Python, we don't report it as an error
-  if the filename seems like maybe it wasn't meant to be Python.  This is a
+  if the file name seems like maybe it wasn't meant to be Python.  This is a
   pragmatic fix for `issue 82`_.
 
 - The ``-m`` switch on ``coverage report``, which includes missing line numbers
-  in the summary report, can now be specifed as ``show_missing`` in the
+  in the summary report, can now be specified as ``show_missing`` in the
   config file.  Closes `issue 173`_.
 
 - When running a module with ``coverage run -m <modulename>``, certain details
@@ -303,7 +706,6 @@
 - When installing into pypy, we no longer attempt (and fail) to compile
   the C tracer function, closing `issue 166`_.
 
-.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report
 .. _issue 142: https://bitbucket.org/ned/coveragepy/issue/142/executing-python-file-syspath-is-replaced
 .. _issue 155: https://bitbucket.org/ned/coveragepy/issue/155/cant-use-coverage-run-m-unittest-discover
 .. _issue 157: https://bitbucket.org/ned/coveragepy/issue/157/chokes-on-source-files-with-non-utf-8
@@ -323,10 +725,10 @@
 Version 3.5.1b1 --- 28 August 2011
 ----------------------------------
 
-- When combining data files from parallel runs, you can now instruct coverage
-  about which directories are equivalent on different machines.  A ``[paths]``
-  section in the configuration file lists paths that are to be considered
-  equivalent.  Finishes `issue 17`_.
+- When combining data files from parallel runs, you can now instruct
+  coverage.py about which directories are equivalent on different machines.  A
+  ``[paths]`` section in the configuration file lists paths that are to be
+  considered equivalent.  Finishes `issue 17`_.
 
 - for-else constructs are understood better, and don't cause erroneous partial
   branch warnings.  Fixes `issue 122`_.
@@ -339,7 +741,7 @@
 
 - An explicit include directive to measure files in the Python installation
   wouldn't work because of the standard library exclusion.  Now the include
-  directive takes precendence, and the files will be measured.  Fixes
+  directive takes precedence, and the files will be measured.  Fixes
   `issue 138`_.
 
 - The HTML report now handles Unicode characters in Python source files
@@ -348,7 +750,7 @@
 
 - In order to help the core developers measure the test coverage of the
   standard library, Brandon Rhodes devised an aggressive hack to trick Python
-  into running some coverage code before anything else in the process.
+  into running some coverage.py code before anything else in the process.
   See the coverage/fullcoverage directory if you are interested.
 
 .. _issue 17: http://bitbucket.org/ned/coveragepy/issue/17/support-combining-coverage-data-from
@@ -495,7 +897,7 @@
   file patterns rather than file prefixes, closing `issue 34`_ and `issue 36`_.
 
 - BACKWARD INCOMPATIBILITY: the `omit_prefixes` argument is gone throughout
-  coverage.py, replaced with `omit`, a list of filename patterns suitable for
+  coverage.py, replaced with `omit`, a list of file name patterns suitable for
   `fnmatch`.  A parallel argument `include` controls what files are included.
 
 - The run command now has a ``--source`` switch, a list of directories or
@@ -536,7 +938,7 @@
   and parent processes.  Use ``coverage run -p`` to get two data files that can
   be combined with ``coverage combine``.  Fixes `issue 56`_.
 
-- Coverage is now runnable as a module: ``python -m coverage``.  Thanks,
+- Coverage.py is now runnable as a module: ``python -m coverage``.  Thanks,
   Brett Cannon.
 
 - When measuring code running in a virtualenv, most of the system library was
@@ -545,7 +947,7 @@
 - Doctest text files are no longer recorded in the coverage data, since they
   can't be reported anyway.  Fixes `issue 52`_ and `issue 61`_.
 
-- Jinja HTML templates compile into Python code using the HTML filename,
+- Jinja HTML templates compile into Python code using the HTML file name,
   which confused coverage.py.  Now these files are no longer traced, fixing
   `issue 82`_.
 
@@ -757,8 +1159,8 @@
   raised.  This is now fixed.
 
 - The coverage.py code itself will now not be measured by coverage.py, and no
-  coverage modules will be mentioned in the nose --with-cover plug-in.  Fixed
-  `issue 8`_.
+  coverage.py modules will be mentioned in the nose --with-cover plug-in.
+  Fixed `issue 8`_.
 
 - When running source files, coverage.py now opens them in universal newline
   mode just like Python does.  This lets it run Windows files on Mac, for
@@ -837,10 +1239,10 @@
 
 Major overhaul.
 
-- Coverage is now a package rather than a module.  Functionality has been split
-  into classes.
+- Coverage.py is now a package rather than a module.  Functionality has been
+  split into classes.
 
-- The trace function is implemented in C for speed.  Coverage runs are now
+- The trace function is implemented in C for speed.  Coverage.py runs are now
   much faster.  Thanks to David Christian for productive micro-sprints and
   other encouragement.
 
@@ -852,7 +1254,7 @@
 
 - The singleton coverage object is only created if the module-level functions
   are used.  This maintains the old interface while allowing better
-  programmatic use of Coverage.
+  programmatic use of Coverage.py.
 
 - The minimum supported Python version is 2.3.
 
@@ -950,7 +1352,7 @@
 - Add a file argument to report so that reports can be captured to a different
   destination.
 
-- coverage.py can now measure itself.
+- Coverage.py can now measure itself.
 
 - Adapted Greg Rogers' patch for using relative file names, and sorting and
   omitting files to report on.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/DebugClients/Python/coverage/doc/LICENSE.txt	Sat Oct 10 12:44:52 2015 +0200
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
--- a/DebugClients/Python/coverage/doc/PKG-INFO	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-Metadata-Version: 1.0
-Name: coverage
-Version: 3.7.1
-Summary: Code coverage measurement for Python
-Home-page: http://nedbatchelder.com/code/coverage
-Author: Ned Batchelder and others
-Author-email: ned@nedbatchelder.com
-License: BSD
-Description: Coverage.py measures code coverage, typically during test execution. It uses
-        the code analysis tools and tracing hooks provided in the Python standard
-        library to determine which lines are executable, and which have been executed.
-        
-        Coverage.py runs on Pythons 2.3 through 3.3, and PyPy 1.9.
-        
-        Documentation is at `nedbatchelder.com <http://nedbatchelder.com/code/coverage>`_.  Code repository and issue
-        tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_, with a
-        mirrored repo on `Github <https://github.com/nedbat/coveragepy>`_.
-        
-        New in 3.7: ``--debug``, and 12 bugs closed.
-        
-        New in 3.6: ``--fail-under``, and >20 bugs closed.
-        
-        New in 3.5: Branch coverage exclusions, keyboard shortcuts in HTML report.
-        
-        New in 3.4: Better control over source to measure, and unexecuted files
-        can be reported.
-        
-        New in 3.3: .coveragerc files.
-        
-        New in 3.2: Branch coverage!
-Keywords: code coverage testing
-Platform: UNKNOWN
-Classifier: Environment :: Console
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 3
-Classifier: Topic :: Software Development :: Quality Assurance
-Classifier: Topic :: Software Development :: Testing
-Classifier: Development Status :: 5 - Production/Stable
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/DebugClients/Python/coverage/doc/README.rst	Sat Oct 10 12:44:52 2015 +0200
@@ -0,0 +1,74 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+===========
+Coverage.py
+===========
+
+Code coverage testing for Python.
+
+|  |license| |versions| |status| |docs|
+|  |ci-status| |win-ci-status| |codecov|
+|  |kit| |format| |downloads|
+
+Coverage.py measures code coverage, typically during test execution. It uses
+the code analysis tools and tracing hooks provided in the Python standard
+library to determine which lines are executable, and which have been executed.
+
+Coverage.py runs on CPython 2.6, 2.7, 3.3, 3.4 or 3.5, PyPy 2.6, and PyPy3 2.4.
+
+Documentation is on `Read the Docs <http://coverage.readthedocs.org>`_.
+Code repository and issue tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_,
+with a mirrored repository on `GitHub <https://github.com/nedbat/coveragepy>`_.
+
+**New in 4.0:** ``--concurrency``, plugins for non-Python files, setup.cfg
+support, --skip-covered, HTML filtering, and more than 50 issues closed.
+
+
+Quick Start
+-----------
+
+See the `quick start <http://coverage.readthedocs.org/#quick-start>`_
+section of the docs.
+
+
+License
+-------
+
+Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0.
+For details, see https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt.
+
+
+.. |ci-status| image:: https://travis-ci.org/nedbat/coveragepy.svg?branch=master
+    :target: https://travis-ci.org/nedbat/coveragepy
+    :alt: Build status
+.. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/bitbucket/ned/coveragepy?svg=true
+    :target: https://ci.appveyor.com/project/nedbat/coveragepy
+    :alt: Windows build status
+.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
+    :target: http://coverage.readthedocs.org
+    :alt: Documentation
+.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
+    :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
+    :alt: Requirements status
+.. |kit| image:: https://badge.fury.io/py/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: PyPI status
+.. |format| image:: https://img.shields.io/pypi/format/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Kit format
+.. |downloads| image:: https://img.shields.io/pypi/dd/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Daily PyPI downloads
+.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Python versions supported
+.. |status| image:: https://img.shields.io/pypi/status/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Package stability
+.. |license| image:: https://img.shields.io/pypi/l/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: License
+.. |codecov| image:: http://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master
+    :target: http://codecov.io/github/nedbat/coveragepy?branch=master
+    :alt: Coverage!
--- a/DebugClients/Python/coverage/doc/README.txt	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-Coverage.py: code coverage testing for Python
-
-Coverage.py measures code coverage, typically during test execution.  It uses
-the code analysis tools and tracing hooks provided in the Python standard
-library to determine which lines are executable, and which have been executed.
-
-For more information, see http://nedbatchelder.com/code/coverage
-
-Code repo and issue tracking are at http://bitbucket.org/ned/coveragepy
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/DebugClients/Python/coverage/env.py	Sat Oct 10 12:44:52 2015 +0200
@@ -0,0 +1,32 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Determine facts about the environment."""
+
+import os
+import sys
+
+# Operating systems.
+WINDOWS = sys.platform == "win32"
+LINUX = sys.platform == "linux2"
+
+# Python implementations.
+PYPY = '__pypy__' in sys.builtin_module_names
+
+# Python versions.
+PYVERSION = sys.version_info
+PY2 = PYVERSION < (3, 0)
+PY3 = PYVERSION >= (3, 0)
+
+# Coverage.py specifics.
+
+# Are we using the C-implemented trace function?
+C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
+
+# Are we coverage-measuring ourselves?
+METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
+
+# Are we running our test suite?
+# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
+# test-specific behavior like contracts.
+TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
--- a/DebugClients/Python/coverage/execfile.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/execfile.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,41 +1,71 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """Execute files of Python code."""
 
-import imp, marshal, os, sys
+import marshal
+import os
+import sys
+import types
 
-from .backward import exec_code_object, open_source
-from .misc import ExceptionDuringRun, NoCode, NoSource
+from coverage.backward import BUILTINS
+from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
+from coverage.misc import ExceptionDuringRun, NoCode, NoSource
+from coverage.phystokens import compile_unicode
+from coverage.python import get_python_source
 
 
-try:
-    # In Py 2.x, the builtins were in __builtin__
-    BUILTINS = sys.modules['__builtin__']
-except KeyError:
-    # In Py 3.x, they're in builtins
-    BUILTINS = sys.modules['builtins']
+class DummyLoader(object):
+    """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
+
+    Currently only implements the .fullname attribute
+    """
+    def __init__(self, fullname, *_args):
+        self.fullname = fullname
 
 
-def rsplit1(s, sep):
-    """The same as s.rsplit(sep, 1), but works in 2.3"""
-    parts = s.split(sep)
-    return sep.join(parts[:-1]), parts[-1]
-
-
-def run_python_module(modulename, args):
-    """Run a python module, as though with ``python -m name args...``.
+if importlib_util_find_spec:
+    def find_module(modulename):
+        """Find the module named `modulename`.
 
-    `modulename` is the name of the module, possibly a dot-separated name.
-    `args` is the argument array to present as sys.argv, including the first
-    element naming the module being executed.
+        Returns the file path of the module, and the name of the enclosing
+        package.
+        """
+        try:
+            spec = importlib_util_find_spec(modulename)
+        except ImportError as err:
+            raise NoSource(str(err))
+        if not spec:
+            raise NoSource("No module named %r" % (modulename,))
+        pathname = spec.origin
+        packagename = spec.name
+        if pathname.endswith("__init__.py"):
+            mod_main = modulename + ".__main__"
+            spec = importlib_util_find_spec(mod_main)
+            if not spec:
+                raise NoSource(
+                    "No module named %s; "
+                    "%r is a package and cannot be directly executed"
+                    % (mod_main, modulename)
+                )
+            pathname = spec.origin
+            packagename = spec.name
+        packagename = packagename.rpartition(".")[0]
+        return pathname, packagename
+else:
+    def find_module(modulename):
+        """Find the module named `modulename`.
 
-    """
-    openfile = None
-    glo, loc = globals(), locals()
-    try:
+        Returns the file path of the module, and the name of the enclosing
+        package.
+        """
+        openfile = None
+        glo, loc = globals(), locals()
         try:
             # Search for the module - inside its parent package, if any - using
             # standard import mechanics.
             if '.' in modulename:
-                packagename, name = rsplit1(modulename, '.')
+                packagename, name = modulename.rsplit('.', 1)
                 package = __import__(packagename, glo, loc, ['__path__'])
                 searchpath = package.__path__
             else:
@@ -57,51 +87,92 @@
                 package = __import__(packagename, glo, loc, ['__path__'])
                 searchpath = package.__path__
                 openfile, pathname, _ = imp.find_module(name, searchpath)
-        except ImportError:
-            _, err, _ = sys.exc_info()
+        except ImportError as err:
             raise NoSource(str(err))
-    finally:
-        if openfile:
-            openfile.close()
+        finally:
+            if openfile:
+                openfile.close()
+
+        return pathname, packagename
+
 
-    # Finally, hand the file off to run_python_file for execution.
+def run_python_module(modulename, args):
+    """Run a Python module, as though with ``python -m name args...``.
+
+    `modulename` is the name of the module, possibly a dot-separated name.
+    `args` is the argument array to present as sys.argv, including the first
+    element naming the module being executed.
+
+    """
+    pathname, packagename = find_module(modulename)
+
     pathname = os.path.abspath(pathname)
     args[0] = pathname
-    run_python_file(pathname, args, package=packagename)
+    run_python_file(pathname, args, package=packagename, modulename=modulename, path0="")
 
 
-def run_python_file(filename, args, package=None):
-    """Run a python file as if it were the main program on the command line.
+def run_python_file(filename, args, package=None, modulename=None, path0=None):
+    """Run a Python file as if it were the main program on the command line.
 
     `filename` is the path to the file to execute, it need not be a .py file.
     `args` is the argument array to present as sys.argv, including the first
     element naming the file being executed.  `package` is the name of the
     enclosing package, if any.
 
+    `modulename` is the name of the module the file was run as.
+
+    `path0` is the value to put into sys.path[0].  If it's None, then this
+    function will decide on a value.
+
     """
+    if modulename is None and sys.version_info >= (3, 3):
+        modulename = '__main__'
+
     # Create a module to serve as __main__
     old_main_mod = sys.modules['__main__']
-    main_mod = imp.new_module('__main__')
+    main_mod = types.ModuleType('__main__')
     sys.modules['__main__'] = main_mod
     main_mod.__file__ = filename
     if package:
         main_mod.__package__ = package
+    if modulename:
+        main_mod.__loader__ = DummyLoader(modulename)
+
     main_mod.__builtins__ = BUILTINS
 
     # Set sys.argv properly.
     old_argv = sys.argv
     sys.argv = args
 
+    if os.path.isdir(filename):
+        # Running a directory means running the __main__.py file in that
+        # directory.
+        my_path0 = filename
+
+        for ext in [".py", ".pyc", ".pyo"]:
+            try_filename = os.path.join(filename, "__main__" + ext)
+            if os.path.exists(try_filename):
+                filename = try_filename
+                break
+        else:
+            raise NoSource("Can't find '__main__' module in '%s'" % filename)
+    else:
+        my_path0 = os.path.abspath(os.path.dirname(filename))
+
+    # Set sys.path correctly.
+    old_path0 = sys.path[0]
+    sys.path[0] = path0 if path0 is not None else my_path0
+
     try:
         # Make a code object somehow.
-        if filename.endswith(".pyc") or filename.endswith(".pyo"):
+        if filename.endswith((".pyc", ".pyo")):
             code = make_code_from_pyc(filename)
         else:
             code = make_code_from_py(filename)
 
         # Execute the code object.
         try:
-            exec_code_object(code, main_mod.__dict__)
+            exec(code, main_mod.__dict__)
         except SystemExit:
             # The user called sys.exit().  Just pass it along to the upper
             # layers, where it will be handled.
@@ -109,37 +180,34 @@
         except:
             # Something went wrong while executing the user code.
             # Get the exc_info, and pack them into an exception that we can
-            # throw up to the outer loop.  We peel two layers off the traceback
+            # throw up to the outer loop.  We peel one layer off the traceback
             # so that the coverage.py code doesn't appear in the final printed
             # traceback.
             typ, err, tb = sys.exc_info()
-            raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
+
+            # PyPy3 weirdness.  If I don't access __context__, then somehow it
+            # is non-None when the exception is reported at the upper layer,
+            # and a nested exception is shown to the user.  This getattr fixes
+            # it somehow? https://bitbucket.org/pypy/pypy/issue/1903
+            getattr(err, '__context__', None)
+
+            raise ExceptionDuringRun(typ, err, tb.tb_next)
     finally:
-        # Restore the old __main__
+        # Restore the old __main__, argv, and path.
         sys.modules['__main__'] = old_main_mod
+        sys.argv = old_argv
+        sys.path[0] = old_path0
 
-        # Restore the old argv and path
-        sys.argv = old_argv
 
 def make_code_from_py(filename):
     """Get source from `filename` and make a code object of it."""
     # Open the source file.
     try:
-        source_file = open_source(filename)
-    except IOError:
-        raise NoSource("No file to run: %r" % filename)
+        source = get_python_source(filename)
+    except (IOError, NoSource):
+        raise NoSource("No file to run: '%s'" % filename)
 
-    try:
-        source = source_file.read()
-    finally:
-        source_file.close()
-
-    # We have the source.  `compile` still needs the last line to be clean,
-    # so make sure it is, then compile a code object from it.
-    if not source or source[-1] != '\n':
-        source += '\n'
-    code = compile(source, filename, "exec")
-
+    code = compile_unicode(source, filename, "exec")
     return code
 
 
@@ -148,13 +216,13 @@
     try:
         fpyc = open(filename, "rb")
     except IOError:
-        raise NoCode("No file to run: %r" % filename)
+        raise NoCode("No file to run: '%s'" % filename)
 
-    try:
+    with fpyc:
         # First four bytes are a version-specific magic number.  It has to
         # match or we won't run the file.
         magic = fpyc.read(4)
-        if magic != imp.get_magic():
+        if magic != PYC_MAGIC_NUMBER:
             raise NoCode("Bad magic number in .pyc file")
 
         # Skip the junk in the header that we don't need.
@@ -165,8 +233,6 @@
 
         # The rest of the file is the code object we want.
         code = marshal.load(fpyc)
-    finally:
-        fpyc.close()
 
     return code
 
--- a/DebugClients/Python/coverage/files.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/files.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,115 +1,123 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
 """File wrangling."""
 
-from .backward import to_string
-from .misc import CoverageException
-import fnmatch, os, os.path, re, sys
-import ntpath, posixpath
-
-class FileLocator(object):
-    """Understand how filenames work."""
-
-    def __init__(self):
-        # The absolute path to our current directory.
-        self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep)
-        if isinstance(self.relative_dir, str):
-            self.relative_dir = self.relative_dir.decode(sys.getfilesystemencoding())
-
-        # Cache of results of calling the canonical_filename() method, to
-        # avoid duplicating work.
-        self.canonical_filename_cache = {}
-
-    def relative_filename(self, filename):
-        """Return the relative form of `filename`.
-
-        The filename will be relative to the current directory when the
-        `FileLocator` was constructed.
-
-        """
-        if isinstance(filename, str):
-            filename = filename.decode(sys.getfilesystemencoding())
-        fnorm = os.path.normcase(filename)
-        if fnorm.startswith(self.relative_dir):
-            filename = filename[len(self.relative_dir):]
-        return filename
-
-    def canonical_filename(self, filename):
-        """Return a canonical filename for `filename`.
-
-        An absolute path with no redundant components and normalized case.
+import fnmatch
+import ntpath
+import os
+import os.path
+import posixpath
+import re
+import sys
 
-        """
-        if filename not in self.canonical_filename_cache:
-            if not os.path.isabs(filename):
-                for path in [os.curdir] + sys.path:
-                    if path is None:
-                        continue
-                    f = os.path.join(path, filename)
-                    if os.path.exists(f):
-                        filename = f
-                        break
-            cf = abs_file(filename)
-            self.canonical_filename_cache[filename] = cf
-        return self.canonical_filename_cache[filename]
-
-    def get_zip_data(self, filename):
-        """Get data from `filename` if it is a zip file path.
+from coverage import env
+from coverage.backward import unicode_class
+from coverage.misc import CoverageException, join_regex
 
-        Returns the string data read from the zip file, or None if no zip file
-        could be found or `filename` isn't in it.  The data returned will be
-        an empty string if the file is empty.
 
-        """
-        import zipimport
-        markers = ['.zip'+os.sep, '.egg'+os.sep]
-        for marker in markers:
-            if marker in filename:
-                parts = filename.split(marker)
-                try:
-                    zi = zipimport.zipimporter(parts[0]+marker[:-1])
-                except zipimport.ZipImportError:
-                    continue
-                try:
-                    data = zi.get_data(parts[1])
-                except IOError:
-                    continue
-                return to_string(data)
-        return None
+RELATIVE_DIR = None
+CANONICAL_FILENAME_CACHE = {}
 
 
-if sys.platform == 'win32':
+def set_relative_directory():
+    """Set the directory that `relative_filename` will be relative to."""
+    global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
+
+    # The absolute path to our current directory.
+    RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
+
+    # Cache of results of calling the canonical_filename() method, to
+    # avoid duplicating work.
+    CANONICAL_FILENAME_CACHE = {}
+
+def relative_directory():
+    """Return the directory that `relative_filename` is relative to."""
+    return RELATIVE_DIR
+
+def relative_filename(filename):
+    """Return the relative form of `filename`.
+
+    The file name will be relative to the current directory when the
+    `set_relative_directory` was called.
+
+    """
+    fnorm = os.path.normcase(filename)
+    if fnorm.startswith(RELATIVE_DIR):
+        filename = filename[len(RELATIVE_DIR):]
+    return filename
+
+def canonical_filename(filename):
+    """Return a canonical file name for `filename`.
+
+    An absolute path with no redundant components and normalized case.
+
+    """
+    if filename not in CANONICAL_FILENAME_CACHE:
+        if not os.path.isabs(filename):
+            for path in [os.curdir] + sys.path:
+                if path is None:
+                    continue
+                f = path + os.sep + filename
+                if os.path.exists(f):
+                    filename = f
+                    break
+        cf = abs_file(filename)
+        CANONICAL_FILENAME_CACHE[filename] = cf
+    return CANONICAL_FILENAME_CACHE[filename]
+
+
+def flat_rootname(filename):
+    """A base for a flat file name to correspond to this file.
+
+    Useful for writing files about the code where you want all the files in
+    the same directory, but need to differentiate same-named files from
+    different directories.
+
+    For example, the file a/b/c.py will return 'a_b_c_py'
+
+    """
+    name = ntpath.splitdrive(filename)[1]
+    return re.sub(r"[\\/.:]", "_", name)
+
+
+if env.WINDOWS:
+
+    _ACTUAL_PATH_CACHE = {}
+    _ACTUAL_PATH_LIST_CACHE = {}
 
     def actual_path(path):
         """Get the actual path of `path`, including the correct case."""
-        if path in actual_path.cache:
-            return actual_path.cache[path]
+        if env.PY2 and isinstance(path, unicode_class):
+            path = path.encode(sys.getfilesystemencoding())
+        if path in _ACTUAL_PATH_CACHE:
+            return _ACTUAL_PATH_CACHE[path]
 
         head, tail = os.path.split(path)
         if not tail:
-            actpath = head
+            # This means head is the drive spec: normalize it.
+            actpath = head.upper()
         elif not head:
             actpath = tail
         else:
             head = actual_path(head)
-            if head in actual_path.list_cache:
-                files = actual_path.list_cache[head]
+            if head in _ACTUAL_PATH_LIST_CACHE:
+                files = _ACTUAL_PATH_LIST_CACHE[head]
             else:
                 try:
                     files = os.listdir(head)
                 except OSError:
                     files = []
-                actual_path.list_cache[head] = files
+                _ACTUAL_PATH_LIST_CACHE[head] = files
             normtail = os.path.normcase(tail)
             for f in files:
                 if os.path.normcase(f) == normtail:
                     tail = f
                     break
-            actpath = os.path.join(head, tail)
-        actual_path.cache[path] = actpath
+            actpath = head.strip(os.sep) + os.sep + tail
+        _ACTUAL_PATH_CACHE[path] = actpath
         return actpath
 
-    actual_path.cache = {}
-    actual_path.list_cache = {}
-
 else:
     def actual_path(filename):
         """The actual path for non-Windows platforms."""
@@ -141,7 +149,7 @@
     """
     prepped = []
     for p in patterns or []:
-        if p.startswith("*") or p.startswith("?"):
+        if p.startswith(("*", "?")):
             prepped.append(p)
         else:
             prepped.append(abs_file(p))
@@ -151,7 +159,7 @@
 class TreeMatcher(object):
     """A matcher for files in a tree."""
     def __init__(self, directories):
-        self.dirs = directories[:]
+        self.dirs = list(directories)
 
     def __repr__(self):
         return "<TreeMatcher %r>" % self.dirs
@@ -160,10 +168,6 @@
         """A list of strings for displaying when dumping state."""
         return self.dirs
 
-    def add(self, directory):
-        """Add another directory to the list we match for."""
-        self.dirs.append(directory)
-
     def match(self, fpath):
         """Does `fpath` indicate a file in one of our trees?"""
         for d in self.dirs:
@@ -177,10 +181,49 @@
         return False
 
 
+class ModuleMatcher(object):
+    """A matcher for modules in a tree."""
+    def __init__(self, module_names):
+        self.modules = list(module_names)
+
+    def __repr__(self):
+        return "<ModuleMatcher %r>" % (self.modules)
+
+    def info(self):
+        """A list of strings for displaying when dumping state."""
+        return self.modules
+
+    def match(self, module_name):
+        """Does `module_name` indicate a module in one of our packages?"""
+        if not module_name:
+            return False
+
+        for m in self.modules:
+            if module_name.startswith(m):
+                if module_name == m:
+                    return True
+                if module_name[len(m)] == '.':
+                    # This is a module in the package
+                    return True
+
+        return False
+
+
 class FnmatchMatcher(object):
-    """A matcher for files by filename pattern."""
+    """A matcher for files by file name pattern."""
     def __init__(self, pats):
         self.pats = pats[:]
+        # fnmatch is platform-specific. On Windows, it does the Windows thing
+        # of treating / and \ as equivalent. But on other platforms, we need to
+        # take care of that ourselves.
+        fnpats = (fnmatch.translate(p) for p in pats)
+        fnpats = (p.replace(r"\/", r"[\\/]") for p in fnpats)
+        if env.WINDOWS:
+            # Windows is also case-insensitive.  BTW: the regex docs say that
+            # flags like (?i) have to be at the beginning, but fnmatch puts
+            # them at the end, and having two there seems to work fine.
+            fnpats = (p + "(?i)" for p in fnpats)
+        self.re = re.compile(join_regex(fnpats))
 
     def __repr__(self):
         return "<FnmatchMatcher %r>" % self.pats
@@ -190,11 +233,8 @@
         return self.pats
 
     def match(self, fpath):
-        """Does `fpath` match one of our filename patterns?"""
-        for pat in self.pats:
-            if fnmatch.fnmatch(fpath, pat):
-                return True
-        return False
+        """Does `fpath` match one of our file name patterns?"""
+        return self.re.match(fpath) is not None
 
 
 def sep(s):
@@ -217,12 +257,9 @@
     A `PathAliases` object tracks a list of pattern/result pairs, and can
     map a path through those aliases to produce a unified path.
 
-    `locator` is a FileLocator that is used to canonicalize the results.
-
     """
-    def __init__(self, locator=None):
+    def __init__(self):
         self.aliases = []
-        self.locator = locator
 
     def add(self, pattern, result):
         """Add the `pattern`/`result` pair to the list of aliases.
@@ -249,11 +286,10 @@
             pattern = abs_file(pattern)
         pattern += pattern_sep
 
-        # Make a regex from the pattern.  fnmatch always adds a \Z or $ to
+        # Make a regex from the pattern.  fnmatch always adds a \Z to
         # match the whole string, which we don't want.
         regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
-        if regex_pat.endswith("$"):
-            regex_pat = regex_pat[:-1]
+
         # We want */a/b.py to match on Windows too, so change slash to match
         # either separator.
         regex_pat = regex_pat.replace(r"\/", r"[\\/]")
@@ -276,6 +312,10 @@
         The separator style in the result is made to match that of the result
         in the alias.
 
+        Returns the mapped path.  If a mapping has happened, this is a
+        canonical path.  If no mapping has happened, it is the original value
+        of `path` unchanged.
+
         """
         for regex, result, pattern_sep, result_sep in self.aliases:
             m = regex.match(path)
@@ -283,8 +323,7 @@
                 new = path.replace(m.group(0), result)
                 if pattern_sep != result_sep:
                     new = new.replace(pattern_sep, result_sep)
-                if self.locator:
-                    new = self.locator.canonical_filename(new)
+                new = canonical_filename(new)
                 return new
         return path
 
@@ -295,7 +334,7 @@
     To be importable, the files have to be in a directory with a __init__.py,
     except for `dirname` itself, which isn't required to have one.  The
     assumption is that `dirname` was specified directly, so the user knows
-    best, but subdirectories are checked for a __init__.py to be sure we only
+    best, but sub-directories are checked for a __init__.py to be sure we only
     find the importable files.
 
     """
@@ -310,7 +349,7 @@
             # files: Must end with .py or .pyw, and must not have certain funny
             # characters that probably mean they are editor junk.
             if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
-                yield os.path.join(dirpath, filename)
+                yield dirpath + os.sep + filename
 
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/html.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/html.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,14 +1,22 @@
-"""HTML reporting for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
 
-import os, re, shutil, sys
+"""HTML reporting for coverage.py."""
 
-from . import coverage
-from .backward import pickle
-from .misc import CoverageException, Hasher
-from .phystokens import source_token_lines, source_encoding
-from .report import Reporter
-from .results import Numbers
-from .templite import Templite
+import datetime
+import json
+import os
+import re
+import shutil
+
+import coverage
+from coverage import env
+from coverage.backward import iitems
+from coverage.files import flat_rootname
+from coverage.misc import CoverageException, Hasher
+from coverage.report import Reporter
+from coverage.results import Numbers
+from coverage.templite import Templite
 
 
 # Static files are looked for in a list of places.
@@ -20,6 +28,7 @@
     os.path.join(os.path.dirname(__file__), "htmlfiles"),
 ]
 
+
 def data_filename(fname, pkgdir=""):
     """Return the path to a data file of ours.
 
@@ -27,69 +36,80 @@
     is returned.
 
     Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
-    is provided, at that subdirectory.
+    is provided, at that sub-directory.
 
     """
+    tried = []
     for static_dir in STATIC_PATH:
         static_filename = os.path.join(static_dir, fname)
         if os.path.exists(static_filename):
             return static_filename
+        else:
+            tried.append(static_filename)
         if pkgdir:
             static_filename = os.path.join(static_dir, pkgdir, fname)
             if os.path.exists(static_filename):
                 return static_filename
-    raise CoverageException("Couldn't find static file %r" % fname)
+            else:
+                tried.append(static_filename)
+    raise CoverageException(
+        "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
+    )
 
 
 def data(fname):
     """Return the contents of a data file of ours."""
-    data_file = open(data_filename(fname))
-    try:
+    with open(data_filename(fname)) as data_file:
         return data_file.read()
-    finally:
-        data_file.close()
 
 
 class HtmlReporter(Reporter):
     """HTML reporting."""
 
-    # These files will be copied from the htmlfiles dir to the output dir.
+    # These files will be copied from the htmlfiles directory to the output
+    # directory.
     STATIC_FILES = [
-            ("style.css", ""),
-            ("jquery.min.js", "jquery"),
-            ("jquery.hotkeys.js", "jquery-hotkeys"),
-            ("jquery.isonscreen.js", "jquery-isonscreen"),
-            ("jquery.tablesorter.min.js", "jquery-tablesorter"),
-            ("coverage_html.js", ""),
-            ("keybd_closed.png", ""),
-            ("keybd_open.png", ""),
-            ]
+        ("style.css", ""),
+        ("jquery.min.js", "jquery"),
+        ("jquery.debounce.min.js", "jquery-debounce"),
+        ("jquery.hotkeys.js", "jquery-hotkeys"),
+        ("jquery.isonscreen.js", "jquery-isonscreen"),
+        ("jquery.tablesorter.min.js", "jquery-tablesorter"),
+        ("coverage_html.js", ""),
+        ("keybd_closed.png", ""),
+        ("keybd_open.png", ""),
+    ]
 
     def __init__(self, cov, config):
         super(HtmlReporter, self).__init__(cov, config)
         self.directory = None
+        title = self.config.html_title
+        if env.PY2:
+            title = title.decode("utf8")
         self.template_globals = {
             'escape': escape,
-            'title': self.config.html_title,
+            'pair': pair,
+            'title': title,
             '__url__': coverage.__url__,
             '__version__': coverage.__version__,
-            }
+        }
         self.source_tmpl = Templite(
             data("pyfile.html"), self.template_globals
-            )
+        )
 
         self.coverage = cov
 
         self.files = []
-        self.arcs = self.coverage.data.has_arcs()
+        self.has_arcs = self.coverage.data.has_arcs()
         self.status = HtmlStatus()
         self.extra_css = None
         self.totals = Numbers()
+        self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
 
     def report(self, morfs):
         """Generate an HTML report for `morfs`.
 
-        `morfs` is a list of modules or filenames.
+        `morfs` is a list of modules or file names.
 
         """
         assert self.config.html_dir, "must give a directory for html reporting"
@@ -100,7 +120,7 @@
         # Check that this run used the same settings as the last run.
         m = Hasher()
         m.update(self.config)
-        these_settings = m.digest()
+        these_settings = m.hexdigest()
         if self.status.settings_hash() != these_settings:
             self.status.reset()
             self.status.set_settings_hash(these_settings)
@@ -119,8 +139,7 @@
         self.index_file()
 
         self.make_local_static_report_files()
-
-        return self.totals.pc_covered
+        return self.totals.n_statements and self.totals.pc_covered
 
     def make_local_static_report_files(self):
         """Make local instances of static files for HTML report."""
@@ -129,62 +148,46 @@
             shutil.copyfile(
                 data_filename(static, pkgdir),
                 os.path.join(self.directory, static)
-                )
+            )
 
         # The user may have extra CSS they want copied.
         if self.extra_css:
             shutil.copyfile(
                 self.config.extra_css,
                 os.path.join(self.directory, self.extra_css)
-                )
+            )
 
     def write_html(self, fname, html):
         """Write `html` to `fname`, properly encoded."""
-        fout = open(fname, "wb")
-        try:
+        with open(fname, "wb") as fout:
             fout.write(html.encode('ascii', 'xmlcharrefreplace'))
-        finally:
-            fout.close()
 
-    def file_hash(self, source, cu):
+    def file_hash(self, source, fr):
         """Compute a hash that changes if the file needs to be re-reported."""
         m = Hasher()
         m.update(source)
-        self.coverage.data.add_to_hash(cu.filename, m)
-        return m.digest()
+        self.coverage.data.add_to_hash(fr.filename, m)
+        return m.hexdigest()
 
-    def html_file(self, cu, analysis):
+    def html_file(self, fr, analysis):
         """Generate an HTML file for one source file."""
-        source_file = cu.source_file()
-        try:
-            source = source_file.read()
-        finally:
-            source_file.close()
+        source = fr.source()
 
         # Find out if the file on disk is already correct.
-        flat_rootname = cu.flat_rootname()
-        this_hash = self.file_hash(source, cu)
-        that_hash = self.status.file_hash(flat_rootname)
+        rootname = flat_rootname(fr.relative_filename())
+        this_hash = self.file_hash(source.encode('utf-8'), fr)
+        that_hash = self.status.file_hash(rootname)
         if this_hash == that_hash:
             # Nothing has changed to require the file to be reported again.
-            self.files.append(self.status.index_info(flat_rootname))
+            self.files.append(self.status.index_info(rootname))
             return
 
-        self.status.set_file_hash(flat_rootname, this_hash)
-
-        # If need be, determine the encoding of the source file. We use it
-        # later to properly write the HTML.
-        if sys.version_info < (3, 0):
-            encoding = source_encoding(source)
-            # Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
-            if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
-                source = source[3:]
-                encoding = "utf-8"
+        self.status.set_file_hash(rootname, this_hash)
 
         # Get the numbers for this file.
         nums = analysis.numbers
 
-        if self.arcs:
+        if self.has_arcs:
             missing_branch_arcs = analysis.missing_branch_arcs()
 
         # These classes determine which lines are highlighted by default.
@@ -195,8 +198,7 @@
 
         lines = []
 
-        for lineno, line in enumerate(source_token_lines(source)):
-            lineno += 1     # 1-based line numbers.
+        for lineno, line in enumerate(fr.source_token_lines(), start=1):
             # Figure out how to mark this line.
             line_class = []
             annotate_html = ""
@@ -207,23 +209,34 @@
                 line_class.append(c_exc)
             elif lineno in analysis.missing:
                 line_class.append(c_mis)
-            elif self.arcs and lineno in missing_branch_arcs:
+            elif self.has_arcs and lineno in missing_branch_arcs:
                 line_class.append(c_par)
-                annlines = []
+                shorts = []
+                longs = []
                 for b in missing_branch_arcs[lineno]:
                     if b < 0:
-                        annlines.append("exit")
+                        shorts.append("exit")
+                        longs.append("the function exit")
                     else:
-                        annlines.append(str(b))
-                annotate_html = "&nbsp;&nbsp; ".join(annlines)
-                if len(annlines) > 1:
-                    annotate_title = "no jumps to these line numbers"
-                elif len(annlines) == 1:
-                    annotate_title = "no jump to this line number"
+                        shorts.append(b)
+                        longs.append("line %d" % b)
+                # 202F is NARROW NO-BREAK SPACE.
+                # 219B is RIGHTWARDS ARROW WITH STROKE.
+                short_fmt = "%s&#x202F;&#x219B;&#x202F;%s"
+                annotate_html = ",&nbsp;&nbsp; ".join(short_fmt % (lineno, d) for d in shorts)
+                annotate_html += " [?]"
+
+                annotate_title = "Line %d was executed, but never jumped to " % lineno
+                if len(longs) == 1:
+                    annotate_title += longs[0]
+                elif len(longs) == 2:
+                    annotate_title += longs[0] + " or " + longs[1]
+                else:
+                    annotate_title += ", ".join(longs[:-1]) + ", or " + longs[-1]
             elif lineno in analysis.statements:
                 line_class.append(c_run)
 
-            # Build the HTML for the line
+            # Build the HTML for the line.
             html = []
             for tok_type, tok_text in line:
                 if tok_type == "ws":
@@ -231,8 +244,8 @@
                 else:
                     tok_html = escape(tok_text) or '&nbsp;'
                     html.append(
-                        "<span class='%s'>%s</span>" % (tok_type, tok_html)
-                        )
+                        '<span class="%s">%s</span>' % (tok_type, tok_html)
+                    )
 
             lines.append({
                 'html': ''.join(html),
@@ -243,16 +256,15 @@
             })
 
         # Write the HTML page for this file.
-        html = spaceless(self.source_tmpl.render({
+        template_values = {
             'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
-            'arcs': self.arcs, 'extra_css': self.extra_css,
-            'cu': cu, 'nums': nums, 'lines': lines,
-        }))
+            'has_arcs': self.has_arcs, 'extra_css': self.extra_css,
+            'fr': fr, 'nums': nums, 'lines': lines,
+            'time_stamp': self.time_stamp,
+        }
+        html = spaceless(self.source_tmpl.render(template_values))
 
-        if sys.version_info < (3, 0):
-            html = html.decode(encoding)
-
-        html_filename = flat_rootname + ".html"
+        html_filename = rootname + ".html"
         html_path = os.path.join(self.directory, html_filename)
         self.write_html(html_path, html)
 
@@ -260,32 +272,26 @@
         index_info = {
             'nums': nums,
             'html_filename': html_filename,
-            'name': cu.name,
-            }
+            'relative_filename': fr.relative_filename(),
+        }
         self.files.append(index_info)
-        self.status.set_index_info(flat_rootname, index_info)
+        self.status.set_index_info(rootname, index_info)
 
     def index_file(self):
         """Write the index.html file for this report."""
-        index_tmpl = Templite(
-            data("index.html"), self.template_globals
-            )
+        index_tmpl = Templite(data("index.html"), self.template_globals)
 
-        self.totals = sum([f['nums'] for f in self.files])
+        self.totals = sum(f['nums'] for f in self.files)
 
         html = index_tmpl.render({
-            'arcs': self.arcs,
+            'has_arcs': self.has_arcs,
             'extra_css': self.extra_css,
             'files': self.files,
             'totals': self.totals,
+            'time_stamp': self.time_stamp,
         })
 
-        if sys.version_info < (3, 0):
-            html = html.decode("utf-8")
-        self.write_html(
-            os.path.join(self.directory, "index.html"),
-            html
-            )
+        self.write_html(os.path.join(self.directory, "index.html"), html)
 
         # Write the latest hashes for next time.
         self.status.write(self.directory)
@@ -294,9 +300,37 @@
 class HtmlStatus(object):
     """The status information we keep to support incremental reporting."""
 
-    STATUS_FILE = "status.dat"
+    STATUS_FILE = "status.json"
     STATUS_FORMAT = 1
 
+    #           pylint: disable=wrong-spelling-in-comment,useless-suppression
+    #  The data looks like:
+    #
+    #  {
+    #      'format': 1,
+    #      'settings': '540ee119c15d52a68a53fe6f0897346d',
+    #      'version': '4.0a1',
+    #      'files': {
+    #          'cogapp___init__': {
+    #              'hash': 'e45581a5b48f879f301c0f30bf77a50c',
+    #              'index': {
+    #                  'html_filename': 'cogapp___init__.html',
+    #                  'name': 'cogapp/__init__',
+    #                  'nums': <coverage.results.Numbers object at 0x10ab7ed0>,
+    #              }
+    #          },
+    #          ...
+    #          'cogapp_whiteutils': {
+    #              'hash': '8504bb427fc488c4176809ded0277d51',
+    #              'index': {
+    #                  'html_filename': 'cogapp_whiteutils.html',
+    #                  'name': 'cogapp/whiteutils',
+    #                  'nums': <coverage.results.Numbers object at 0x10ab7d90>,
+    #              }
+    #          },
+    #      },
+    #  }
+
     def __init__(self):
         self.reset()
 
@@ -310,11 +344,8 @@
         usable = False
         try:
             status_file = os.path.join(directory, self.STATUS_FILE)
-            fstatus = open(status_file, "rb")
-            try:
-                status = pickle.load(fstatus)
-            finally:
-                fstatus.close()
+            with open(status_file, "r") as fstatus:
+                status = json.load(fstatus)
         except (IOError, ValueError):
             usable = False
         else:
@@ -325,7 +356,10 @@
                 usable = False
 
         if usable:
-            self.files = status['files']
+            self.files = {}
+            for filename, fileinfo in iitems(status['files']):
+                fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
+                self.files[filename] = fileinfo
             self.settings = status['settings']
         else:
             self.reset()
@@ -333,17 +367,26 @@
     def write(self, directory):
         """Write the current status to `directory`."""
         status_file = os.path.join(directory, self.STATUS_FILE)
+        files = {}
+        for filename, fileinfo in iitems(self.files):
+            fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
+            files[filename] = fileinfo
+
         status = {
             'format': self.STATUS_FORMAT,
             'version': coverage.__version__,
             'settings': self.settings,
-            'files': self.files,
-            }
-        fout = open(status_file, "wb")
-        try:
-            pickle.dump(status, fout)
-        finally:
-            fout.close()
+            'files': files,
+        }
+        with open(status_file, "w") as fout:
+            json.dump(status, fout)
+
+        # Older versions of ShiningPanda look for the old name, status.dat.
+        # Accomodate them if we are running under Jenkins.
+        # https://issues.jenkins-ci.org/browse/JENKINS-28428
+        if "JENKINS_URL" in os.environ:
+            with open(os.path.join(directory, "status.dat"), "w") as dat:
+                dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n")
 
     def settings_hash(self):
         """Get the hash of the coverage.py settings."""
@@ -374,16 +417,18 @@
 
 def escape(t):
     """HTML-escape the text in `t`."""
-    return (t
-            # Convert HTML special chars into HTML entities.
-            .replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
-            .replace("'", "&#39;").replace('"', "&quot;")
-            # Convert runs of spaces: "......" -> "&nbsp;.&nbsp;.&nbsp;."
-            .replace("  ", "&nbsp; ")
-            # To deal with odd-length runs, convert the final pair of spaces
-            # so that "....." -> "&nbsp;.&nbsp;&nbsp;."
-            .replace("  ", "&nbsp; ")
-        )
+    return (
+        t
+        # Convert HTML special chars into HTML entities.
+        .replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
+        .replace("'", "&#39;").replace('"', "&quot;")
+        # Convert runs of spaces: "......" -> "&nbsp;.&nbsp;.&nbsp;."
+        .replace("  ", "&nbsp; ")
+        # To deal with odd-length runs, convert the final pair of spaces
+        # so that "....." -> "&nbsp;.&nbsp;&nbsp;."
+        .replace("  ", "&nbsp; ")
+    )
+
 
 def spaceless(html):
     """Squeeze out some annoying extra space from an HTML string.
@@ -395,5 +440,10 @@
     html = re.sub(r">\s+<p ", ">\n<p ", html)
     return html
 
+
+def pair(ratio):
+    """Format a pair of numbers so JavaScript can read them in an attribute."""
+    return "%s %s" % ratio
+
 #
 # eflag: FileType = Python2
--- a/DebugClients/Python/coverage/htmlfiles/coverage_html.js	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,376 +0,0 @@
-// Coverage.py HTML report browser code.
-/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
-/*global coverage: true, document, window, $ */
-
-coverage = {};
-
-// Find all the elements with shortkey_* class, and use them to assign a shotrtcut key.
-coverage.assign_shortkeys = function () {
-    $("*[class*='shortkey_']").each(function (i, e) {
-        $.each($(e).attr("class").split(" "), function (i, c) {
-            if (/^shortkey_/.test(c)) {
-                $(document).bind('keydown', c.substr(9), function () {
-                    $(e).click();
-                });
-            }
-        });
-    });
-};
-
-// Create the events for the help panel.
-coverage.wire_up_help_panel = function () {
-    $("#keyboard_icon").click(function () {
-        // Show the help panel, and position it so the keyboard icon in the
-        // panel is in the same place as the keyboard icon in the header.
-        $(".help_panel").show();
-        var koff = $("#keyboard_icon").offset();
-        var poff = $("#panel_icon").position();
-        $(".help_panel").offset({
-            top: koff.top-poff.top,
-            left: koff.left-poff.left
-        });
-    });
-    $("#panel_icon").click(function () {
-        $(".help_panel").hide();
-    });
-};
-
-// Loaded on index.html
-coverage.index_ready = function ($) {
-    // Look for a cookie containing previous sort settings:
-    var sort_list = [];
-    var cookie_name = "COVERAGE_INDEX_SORT";
-    var i;
-
-    // This almost makes it worth installing the jQuery cookie plugin:
-    if (document.cookie.indexOf(cookie_name) > -1) {
-        var cookies = document.cookie.split(";");
-        for (i = 0; i < cookies.length; i++) {
-            var parts = cookies[i].split("=");
-
-            if ($.trim(parts[0]) === cookie_name && parts[1]) {
-                sort_list = eval("[[" + parts[1] + "]]");
-                break;
-            }
-        }
-    }
-
-    // Create a new widget which exists only to save and restore
-    // the sort order:
-    $.tablesorter.addWidget({
-        id: "persistentSort",
-
-        // Format is called by the widget before displaying:
-        format: function (table) {
-            if (table.config.sortList.length === 0 && sort_list.length > 0) {
-                // This table hasn't been sorted before - we'll use
-                // our stored settings:
-                $(table).trigger('sorton', [sort_list]);
-            }
-            else {
-                // This is not the first load - something has
-                // already defined sorting so we'll just update
-                // our stored value to match:
-                sort_list = table.config.sortList;
-            }
-        }
-    });
-
-    // Configure our tablesorter to handle the variable number of
-    // columns produced depending on report options:
-    var headers = [];
-    var col_count = $("table.index > thead > tr > th").length;
-
-    headers[0] = { sorter: 'text' };
-    for (i = 1; i < col_count-1; i++) {
-        headers[i] = { sorter: 'digit' };
-    }
-    headers[col_count-1] = { sorter: 'percent' };
-
-    // Enable the table sorter:
-    $("table.index").tablesorter({
-        widgets: ['persistentSort'],
-        headers: headers
-    });
-
-    coverage.assign_shortkeys();
-    coverage.wire_up_help_panel();
-
-    // Watch for page unload events so we can save the final sort settings:
-    $(window).unload(function () {
-        document.cookie = cookie_name + "=" + sort_list.toString() + "; path=/";
-    });
-};
-
-// -- pyfile stuff --
-
-coverage.pyfile_ready = function ($) {
-    // If we're directed to a particular line number, highlight the line.
-    var frag = location.hash;
-    if (frag.length > 2 && frag[1] === 'n') {
-        $(frag).addClass('highlight');
-        coverage.set_sel(parseInt(frag.substr(2), 10));
-    }
-    else {
-        coverage.set_sel(0);
-    }
-
-    $(document)
-        .bind('keydown', 'j', coverage.to_next_chunk_nicely)
-        .bind('keydown', 'k', coverage.to_prev_chunk_nicely)
-        .bind('keydown', '0', coverage.to_top)
-        .bind('keydown', '1', coverage.to_first_chunk)
-        ;
-
-    $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");});
-    $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");});
-    $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");});
-    $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");});
-
-    coverage.assign_shortkeys();
-    coverage.wire_up_help_panel();
-};
-
-coverage.toggle_lines = function (btn, cls) {
-    btn = $(btn);
-    var hide = "hide_"+cls;
-    if (btn.hasClass(hide)) {
-        $("#source ."+cls).removeClass(hide);
-        btn.removeClass(hide);
-    }
-    else {
-        $("#source ."+cls).addClass(hide);
-        btn.addClass(hide);
-    }
-};
-
-// Return the nth line div.
-coverage.line_elt = function (n) {
-    return $("#t" + n);
-};
-
-// Return the nth line number div.
-coverage.num_elt = function (n) {
-    return $("#n" + n);
-};
-
-// Return the container of all the code.
-coverage.code_container = function () {
-    return $(".linenos");
-};
-
-// Set the selection.  b and e are line numbers.
-coverage.set_sel = function (b, e) {
-    // The first line selected.
-    coverage.sel_begin = b;
-    // The next line not selected.
-    coverage.sel_end = (e === undefined) ? b+1 : e;
-};
-
-coverage.to_top = function () {
-    coverage.set_sel(0, 1);
-    coverage.scroll_window(0);
-};
-
-coverage.to_first_chunk = function () {
-    coverage.set_sel(0, 1);
-    coverage.to_next_chunk();
-};
-
-coverage.is_transparent = function (color) {
-    // Different browsers return different colors for "none".
-    return color === "transparent" || color === "rgba(0, 0, 0, 0)";
-};
-
-coverage.to_next_chunk = function () {
-    var c = coverage;
-
-    // Find the start of the next colored chunk.
-    var probe = c.sel_end;
-    while (true) {
-        var probe_line = c.line_elt(probe);
-        if (probe_line.length === 0) {
-            return;
-        }
-        var color = probe_line.css("background-color");
-        if (!c.is_transparent(color)) {
-            break;
-        }
-        probe++;
-    }
-
-    // There's a next chunk, `probe` points to it.
-    var begin = probe;
-
-    // Find the end of this chunk.
-    var next_color = color;
-    while (next_color === color) {
-        probe++;
-        probe_line = c.line_elt(probe);
-        next_color = probe_line.css("background-color");
-    }
-    c.set_sel(begin, probe);
-    c.show_selection();
-};
-
-coverage.to_prev_chunk = function () {
-    var c = coverage;
-
-    // Find the end of the prev colored chunk.
-    var probe = c.sel_begin-1;
-    var probe_line = c.line_elt(probe);
-    if (probe_line.length === 0) {
-        return;
-    }
-    var color = probe_line.css("background-color");
-    while (probe > 0 && c.is_transparent(color)) {
-        probe--;
-        probe_line = c.line_elt(probe);
-        if (probe_line.length === 0) {
-            return;
-        }
-        color = probe_line.css("background-color");
-    }
-
-    // There's a prev chunk, `probe` points to its last line.
-    var end = probe+1;
-
-    // Find the beginning of this chunk.
-    var prev_color = color;
-    while (prev_color === color) {
-        probe--;
-        probe_line = c.line_elt(probe);
-        prev_color = probe_line.css("background-color");
-    }
-    c.set_sel(probe+1, end);
-    c.show_selection();
-};
-
-// Return the line number of the line nearest pixel position pos
-coverage.line_at_pos = function (pos) {
-    var l1 = coverage.line_elt(1),
-        l2 = coverage.line_elt(2),
-        result;
-    if (l1.length && l2.length) {
-        var l1_top = l1.offset().top,
-            line_height = l2.offset().top - l1_top,
-            nlines = (pos - l1_top) / line_height;
-        if (nlines < 1) {
-            result = 1;
-        }
-        else {
-            result = Math.ceil(nlines);
-        }
-    }
-    else {
-        result = 1;
-    }
-    return result;
-};
-
-// Returns 0, 1, or 2: how many of the two ends of the selection are on
-// the screen right now?
-coverage.selection_ends_on_screen = function () {
-    if (coverage.sel_begin === 0) {
-        return 0;
-    }
-
-    var top = coverage.line_elt(coverage.sel_begin);
-    var next = coverage.line_elt(coverage.sel_end-1);
-
-    return (
-        (top.isOnScreen() ? 1 : 0) +
-        (next.isOnScreen() ? 1 : 0)
-    );
-};
-
-coverage.to_next_chunk_nicely = function () {
-    coverage.finish_scrolling();
-    if (coverage.selection_ends_on_screen() === 0) {
-        // The selection is entirely off the screen: select the top line on
-        // the screen.
-        var win = $(window);
-        coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop()));
-    }
-    coverage.to_next_chunk();
-};
-
-coverage.to_prev_chunk_nicely = function () {
-    coverage.finish_scrolling();
-    if (coverage.selection_ends_on_screen() === 0) {
-        var win = $(window);
-        coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height()));
-    }
-    coverage.to_prev_chunk();
-};
-
-// Select line number lineno, or if it is in a colored chunk, select the
-// entire chunk
-coverage.select_line_or_chunk = function (lineno) {
-    var c = coverage;
-    var probe_line = c.line_elt(lineno);
-    if (probe_line.length === 0) {
-        return;
-    }
-    var the_color = probe_line.css("background-color");
-    if (!c.is_transparent(the_color)) {
-        // The line is in a highlighted chunk.
-        // Search backward for the first line.
-        var probe = lineno;
-        var color = the_color;
-        while (probe > 0 && color === the_color) {
-            probe--;
-            probe_line = c.line_elt(probe);
-            if (probe_line.length === 0) {
-                break;
-            }
-            color = probe_line.css("background-color");
-        }
-        var begin = probe + 1;
-
-        // Search forward for the last line.
-        probe = lineno;
-        color = the_color;
-        while (color === the_color) {
-            probe++;
-            probe_line = c.line_elt(probe);
-            color = probe_line.css("background-color");
-        }
-
-        coverage.set_sel(begin, probe);
-    }
-    else {
-        coverage.set_sel(lineno);
-    }
-};
-
-coverage.show_selection = function () {
-    var c = coverage;
-
-    // Highlight the lines in the chunk
-    c.code_container().find(".highlight").removeClass("highlight");
-    for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) {
-        c.num_elt(probe).addClass("highlight");
-    }
-
-    c.scroll_to_selection();
-};
-
-coverage.scroll_to_selection = function () {
-    // Scroll the page if the chunk isn't fully visible.
-    if (coverage.selection_ends_on_screen() < 2) {
-        // Need to move the page. The html,body trick makes it scroll in all
-        // browsers, got it from http://stackoverflow.com/questions/3042651
-        var top = coverage.line_elt(coverage.sel_begin);
-        var top_pos = parseInt(top.offset().top, 10);
-        coverage.scroll_window(top_pos - 30);
-    }
-};
-
-coverage.scroll_window = function (to_pos) {
-    $("html,body").animate({scrollTop: to_pos}, 200);
-};
-
-coverage.finish_scrolling = function () {
-    $("html,body").stop(true, true);
-};
--- a/DebugClients/Python/coverage/htmlfiles/index.html	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,104 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-    <meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
-    <title>{{ title|escape }}</title>
-    <link rel='stylesheet' href='style.css' type='text/css'>
-    {% if extra_css %}
-        <link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
-    {% endif %}
-    <script type='text/javascript' src='jquery.min.js'></script>
-    <script type='text/javascript' src='jquery.tablesorter.min.js'></script>
-    <script type='text/javascript' src='jquery.hotkeys.js'></script>
-    <script type='text/javascript' src='coverage_html.js'></script>
-    <script type='text/javascript' charset='utf-8'>
-        jQuery(document).ready(coverage.index_ready);
-    </script>
-</head>
-<body id='indexfile'>
-
-<div id='header'>
-    <div class='content'>
-        <h1>{{ title|escape }}:
-            <span class='pc_cov'>{{totals.pc_covered_str}}%</span>
-        </h1>
-        <img id='keyboard_icon' src='keybd_closed.png'>
-    </div>
-</div>
-
-<div class='help_panel'>
-    <img id='panel_icon' src='keybd_open.png'>
-    <p class='legend'>Hot-keys on this page</p>
-    <div>
-    <p class='keyhelp'>
-        <span class='key'>n</span>
-        <span class='key'>s</span>
-        <span class='key'>m</span>
-        <span class='key'>x</span>
-        {% if arcs %}
-        <span class='key'>b</span>
-        <span class='key'>p</span>
-        {% endif %}
-        <span class='key'>c</span> &nbsp; change column sorting
-    </p>
-    </div>
-</div>
-
-<div id='index'>
-    <table class='index'>
-        <thead>
-            {# The title='' attr doesn't work in Safari. #}
-            <tr class='tablehead' title='Click to sort'>
-                <th class='name left headerSortDown shortkey_n'>Module</th>
-                <th class='shortkey_s'>statements</th>
-                <th class='shortkey_m'>missing</th>
-                <th class='shortkey_x'>excluded</th>
-                {% if arcs %}
-                <th class='shortkey_b'>branches</th>
-                <th class='shortkey_p'>partial</th>
-                {% endif %}
-                <th class='right shortkey_c'>coverage</th>
-            </tr>
-        </thead>
-        {# HTML syntax requires thead, tfoot, tbody #}
-        <tfoot>
-            <tr class='total'>
-                <td class='name left'>Total</td>
-                <td>{{totals.n_statements}}</td>
-                <td>{{totals.n_missing}}</td>
-                <td>{{totals.n_excluded}}</td>
-                {% if arcs %}
-                <td>{{totals.n_branches}}</td>
-                <td>{{totals.n_partial_branches}}</td>
-                {% endif %}
-                <td class='right'>{{totals.pc_covered_str}}%</td>
-            </tr>
-        </tfoot>
-        <tbody>
-            {% for file in files %}
-            <tr class='file'>
-                <td class='name left'><a href='{{file.html_filename}}'>{{file.name}}</a></td>
-                <td>{{file.nums.n_statements}}</td>
-                <td>{{file.nums.n_missing}}</td>
-                <td>{{file.nums.n_excluded}}</td>
-                {% if arcs %}
-                <td>{{file.nums.n_branches}}</td>
-                <td>{{file.nums.n_partial_branches}}</td>
-                {% endif %}
-                <td class='right'>{{file.nums.pc_covered_str}}%</td>
-            </tr>
-            {% endfor %}
-        </tbody>
-    </table>
-</div>
-
-<div id='footer'>
-    <div class='content'>
-        <p>
-            <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a>
-        </p>
-    </div>
-</div>
-
-</body>
-</html>
--- a/DebugClients/Python/coverage/htmlfiles/jquery.hotkeys.js	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,99 +0,0 @@
-/*
- * jQuery Hotkeys Plugin
- * Copyright 2010, John Resig
- * Dual licensed under the MIT or GPL Version 2 licenses.
- *
- * Based upon the plugin by Tzury Bar Yochay:
- * http://github.com/tzuryby/hotkeys
- *
- * Original idea by:
- * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/
-*/
-
-(function(jQuery){
-
-	jQuery.hotkeys = {
-		version: "0.8",
-
-		specialKeys: {
-			8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause",
-			20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home",
-			37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del",
-			96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7",
-			104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/",
-			112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8",
-			120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta"
-		},
-
-		shiftNums: {
-			"`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&",
-			"8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<",
-			".": ">",  "/": "?",  "\\": "|"
-		}
-	};
-
-	function keyHandler( handleObj ) {
-		// Only care when a possible input has been specified
-		if ( typeof handleObj.data !== "string" ) {
-			return;
-		}
-
-		var origHandler = handleObj.handler,
-			keys = handleObj.data.toLowerCase().split(" ");
-
-		handleObj.handler = function( event ) {
-			// Don't fire in text-accepting inputs that we didn't directly bind to
-			if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) ||
-				 event.target.type === "text") ) {
-				return;
-			}
-
-			// Keypress represents characters, not special keys
-			var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ],
-				character = String.fromCharCode( event.which ).toLowerCase(),
-				key, modif = "", possible = {};
-
-			// check combinations (alt|ctrl|shift+anything)
-			if ( event.altKey && special !== "alt" ) {
-				modif += "alt+";
-			}
-
-			if ( event.ctrlKey && special !== "ctrl" ) {
-				modif += "ctrl+";
-			}
-
-			// TODO: Need to make sure this works consistently across platforms
-			if ( event.metaKey && !event.ctrlKey && special !== "meta" ) {
-				modif += "meta+";
-			}
-
-			if ( event.shiftKey && special !== "shift" ) {
-				modif += "shift+";
-			}
-
-			if ( special ) {
-				possible[ modif + special ] = true;
-
-			} else {
-				possible[ modif + character ] = true;
-				possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true;
-
-				// "$" can be triggered as "Shift+4" or "Shift+$" or just "$"
-				if ( modif === "shift+" ) {
-					possible[ jQuery.hotkeys.shiftNums[ character ] ] = true;
-				}
-			}
-
-			for ( var i = 0, l = keys.length; i < l; i++ ) {
-				if ( possible[ keys[i] ] ) {
-					return origHandler.apply( this, arguments );
-				}
-			}
-		};
-	}
-
-	jQuery.each([ "keydown", "keyup", "keypress" ], function() {
-		jQuery.event.special[ this ] = { add: keyHandler };
-	});
-
-})( jQuery );
--- a/DebugClients/Python/coverage/htmlfiles/jquery.isonscreen.js	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/* Copyright (c) 2010
- * @author Laurence Wheway
- * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php)
- * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses.
- *
- * @version 1.2.0
- */
-(function($) {
-	jQuery.extend({
-		isOnScreen: function(box, container) {
-			//ensure numbers come in as intgers (not strings) and remove 'px' is it's there
-			for(var i in box){box[i] = parseFloat(box[i])};
-			for(var i in container){container[i] = parseFloat(container[i])};
-
-			if(!container){
-				container = {
-					left: $(window).scrollLeft(),
-					top: $(window).scrollTop(),
-					width: $(window).width(),
-					height: $(window).height()
-				}
-			}
-
-			if(	box.left+box.width-container.left > 0 &&
-				box.left < container.width+container.left &&
-				box.top+box.height-container.top > 0 &&
-				box.top < container.height+container.top
-			) return true;
-			return false;
-		}
-	})
-
-
-	jQuery.fn.isOnScreen = function (container) {
-		for(var i in container){container[i] = parseFloat(container[i])};
-
-		if(!container){
-			container = {
-				left: $(window).scrollLeft(),
-				top: $(window).scrollTop(),
-				width: $(window).width(),
-				height: $(window).height()
-			}
-		}
-
-		if(	$(this).offset().left+$(this).width()-container.left > 0 &&
-			$(this).offset().left < container.width+container.left &&
-			$(this).offset().top+$(this).height()-container.top > 0 &&
-			$(this).offset().top < container.height+container.top
-		) return true;
-		return false;
-	}
-})(jQuery);
--- a/DebugClients/Python/coverage/htmlfiles/jquery.min.js	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,166 +0,0 @@
-/*!
- * jQuery JavaScript Library v1.4.3
- * http://jquery.com/
- *
- * Copyright 2010, John Resig
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * Includes Sizzle.js
- * http://sizzlejs.com/
- * Copyright 2010, The Dojo Foundation
- * Released under the MIT, BSD, and GPL Licenses.
- *
- * Date: Thu Oct 14 23:10:06 2010 -0400
- */
-(function(E,A){function U(){return false}function ba(){return true}function ja(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function Ga(a){var b,d,e=[],f=[],h,k,l,n,s,v,B,D;k=c.data(this,this.nodeType?"events":"__events__");if(typeof k==="function")k=k.events;if(!(a.liveFired===this||!k||!k.live||a.button&&a.type==="click")){if(a.namespace)D=RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)");a.liveFired=this;var H=k.live.slice(0);for(n=0;n<H.length;n++){k=H[n];k.origType.replace(X,
-"")===a.type?f.push(k.selector):H.splice(n--,1)}f=c(a.target).closest(f,a.currentTarget);s=0;for(v=f.length;s<v;s++){B=f[s];for(n=0;n<H.length;n++){k=H[n];if(B.selector===k.selector&&(!D||D.test(k.namespace))){l=B.elem;h=null;if(k.preType==="mouseenter"||k.preType==="mouseleave"){a.type=k.preType;h=c(a.relatedTarget).closest(k.selector)[0]}if(!h||h!==l)e.push({elem:l,handleObj:k,level:B.level})}}}s=0;for(v=e.length;s<v;s++){f=e[s];if(d&&f.level>d)break;a.currentTarget=f.elem;a.data=f.handleObj.data;
-a.handleObj=f.handleObj;D=f.handleObj.origHandler.apply(f.elem,arguments);if(D===false||a.isPropagationStopped()){d=f.level;if(D===false)b=false}}return b}}function Y(a,b){return(a&&a!=="*"?a+".":"")+b.replace(Ha,"`").replace(Ia,"&")}function ka(a,b,d){if(c.isFunction(b))return c.grep(a,function(f,h){return!!b.call(f,h,f)===d});else if(b.nodeType)return c.grep(a,function(f){return f===b===d});else if(typeof b==="string"){var e=c.grep(a,function(f){return f.nodeType===1});if(Ja.test(b))return c.filter(b,
-e,!d);else b=c.filter(b,e)}return c.grep(a,function(f){return c.inArray(f,b)>=0===d})}function la(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var e=c.data(a[d++]),f=c.data(this,e);if(e=e&&e.events){delete f.handle;f.events={};for(var h in e)for(var k in e[h])c.event.add(this,h,e[h][k],e[h][k].data)}}})}function Ka(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}
-function ma(a,b,d){var e=b==="width"?a.offsetWidth:a.offsetHeight;if(d==="border")return e;c.each(b==="width"?La:Ma,function(){d||(e-=parseFloat(c.css(a,"padding"+this))||0);if(d==="margin")e+=parseFloat(c.css(a,"margin"+this))||0;else e-=parseFloat(c.css(a,"border"+this+"Width"))||0});return e}function ca(a,b,d,e){if(c.isArray(b)&&b.length)c.each(b,function(f,h){d||Na.test(a)?e(a,h):ca(a+"["+(typeof h==="object"||c.isArray(h)?f:"")+"]",h,d,e)});else if(!d&&b!=null&&typeof b==="object")c.isEmptyObject(b)?
-e(a,""):c.each(b,function(f,h){ca(a+"["+f+"]",h,d,e)});else e(a,b)}function S(a,b){var d={};c.each(na.concat.apply([],na.slice(0,b)),function(){d[this]=a});return d}function oa(a){if(!da[a]){var b=c("<"+a+">").appendTo("body"),d=b.css("display");b.remove();if(d==="none"||d==="")d="block";da[a]=d}return da[a]}function ea(a){return c.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var u=E.document,c=function(){function a(){if(!b.isReady){try{u.documentElement.doScroll("left")}catch(i){setTimeout(a,
-1);return}b.ready()}}var b=function(i,r){return new b.fn.init(i,r)},d=E.jQuery,e=E.$,f,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,k=/\S/,l=/^\s+/,n=/\s+$/,s=/\W/,v=/\d/,B=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,D=/^[\],:{}\s]*$/,H=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,w=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,G=/(?:^|:|,)(?:\s*\[)+/g,M=/(webkit)[ \/]([\w.]+)/,g=/(opera)(?:.*version)?[ \/]([\w.]+)/,j=/(msie) ([\w.]+)/,o=/(mozilla)(?:.*? rv:([\w.]+))?/,m=navigator.userAgent,p=false,
-q=[],t,x=Object.prototype.toString,C=Object.prototype.hasOwnProperty,P=Array.prototype.push,N=Array.prototype.slice,R=String.prototype.trim,Q=Array.prototype.indexOf,L={};b.fn=b.prototype={init:function(i,r){var y,z,F;if(!i)return this;if(i.nodeType){this.context=this[0]=i;this.length=1;return this}if(i==="body"&&!r&&u.body){this.context=u;this[0]=u.body;this.selector="body";this.length=1;return this}if(typeof i==="string")if((y=h.exec(i))&&(y[1]||!r))if(y[1]){F=r?r.ownerDocument||r:u;if(z=B.exec(i))if(b.isPlainObject(r)){i=
-[u.createElement(z[1])];b.fn.attr.call(i,r,true)}else i=[F.createElement(z[1])];else{z=b.buildFragment([y[1]],[F]);i=(z.cacheable?z.fragment.cloneNode(true):z.fragment).childNodes}return b.merge(this,i)}else{if((z=u.getElementById(y[2]))&&z.parentNode){if(z.id!==y[2])return f.find(i);this.length=1;this[0]=z}this.context=u;this.selector=i;return this}else if(!r&&!s.test(i)){this.selector=i;this.context=u;i=u.getElementsByTagName(i);return b.merge(this,i)}else return!r||r.jquery?(r||f).find(i):b(r).find(i);
-else if(b.isFunction(i))return f.ready(i);if(i.selector!==A){this.selector=i.selector;this.context=i.context}return b.makeArray(i,this)},selector:"",jquery:"1.4.3",length:0,size:function(){return this.length},toArray:function(){return N.call(this,0)},get:function(i){return i==null?this.toArray():i<0?this.slice(i)[0]:this[i]},pushStack:function(i,r,y){var z=b();b.isArray(i)?P.apply(z,i):b.merge(z,i);z.prevObject=this;z.context=this.context;if(r==="find")z.selector=this.selector+(this.selector?" ":
-"")+y;else if(r)z.selector=this.selector+"."+r+"("+y+")";return z},each:function(i,r){return b.each(this,i,r)},ready:function(i){b.bindReady();if(b.isReady)i.call(u,b);else q&&q.push(i);return this},eq:function(i){return i===-1?this.slice(i):this.slice(i,+i+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(N.apply(this,arguments),"slice",N.call(arguments).join(","))},map:function(i){return this.pushStack(b.map(this,function(r,y){return i.call(r,
-y,r)}))},end:function(){return this.prevObject||b(null)},push:P,sort:[].sort,splice:[].splice};b.fn.init.prototype=b.fn;b.extend=b.fn.extend=function(){var i=arguments[0]||{},r=1,y=arguments.length,z=false,F,I,K,J,fa;if(typeof i==="boolean"){z=i;i=arguments[1]||{};r=2}if(typeof i!=="object"&&!b.isFunction(i))i={};if(y===r){i=this;--r}for(;r<y;r++)if((F=arguments[r])!=null)for(I in F){K=i[I];J=F[I];if(i!==J)if(z&&J&&(b.isPlainObject(J)||(fa=b.isArray(J)))){if(fa){fa=false;clone=K&&b.isArray(K)?K:[]}else clone=
-K&&b.isPlainObject(K)?K:{};i[I]=b.extend(z,clone,J)}else if(J!==A)i[I]=J}return i};b.extend({noConflict:function(i){E.$=e;if(i)E.jQuery=d;return b},isReady:false,readyWait:1,ready:function(i){i===true&&b.readyWait--;if(!b.readyWait||i!==true&&!b.isReady){if(!u.body)return setTimeout(b.ready,1);b.isReady=true;if(!(i!==true&&--b.readyWait>0)){if(q){for(var r=0;i=q[r++];)i.call(u,b);q=null}b.fn.triggerHandler&&b(u).triggerHandler("ready")}}},bindReady:function(){if(!p){p=true;if(u.readyState==="complete")return setTimeout(b.ready,
-1);if(u.addEventListener){u.addEventListener("DOMContentLoaded",t,false);E.addEventListener("load",b.ready,false)}else if(u.attachEvent){u.attachEvent("onreadystatechange",t);E.attachEvent("onload",b.ready);var i=false;try{i=E.frameElement==null}catch(r){}u.documentElement.doScroll&&i&&a()}}},isFunction:function(i){return b.type(i)==="function"},isArray:Array.isArray||function(i){return b.type(i)==="array"},isWindow:function(i){return i&&typeof i==="object"&&"setInterval"in i},isNaN:function(i){return i==
-null||!v.test(i)||isNaN(i)},type:function(i){return i==null?String(i):L[x.call(i)]||"object"},isPlainObject:function(i){if(!i||b.type(i)!=="object"||i.nodeType||b.isWindow(i))return false;if(i.constructor&&!C.call(i,"constructor")&&!C.call(i.constructor.prototype,"isPrototypeOf"))return false;for(var r in i);return r===A||C.call(i,r)},isEmptyObject:function(i){for(var r in i)return false;return true},error:function(i){throw i;},parseJSON:function(i){if(typeof i!=="string"||!i)return null;i=b.trim(i);
-if(D.test(i.replace(H,"@").replace(w,"]").replace(G,"")))return E.JSON&&E.JSON.parse?E.JSON.parse(i):(new Function("return "+i))();else b.error("Invalid JSON: "+i)},noop:function(){},globalEval:function(i){if(i&&k.test(i)){var r=u.getElementsByTagName("head")[0]||u.documentElement,y=u.createElement("script");y.type="text/javascript";if(b.support.scriptEval)y.appendChild(u.createTextNode(i));else y.text=i;r.insertBefore(y,r.firstChild);r.removeChild(y)}},nodeName:function(i,r){return i.nodeName&&i.nodeName.toUpperCase()===
-r.toUpperCase()},each:function(i,r,y){var z,F=0,I=i.length,K=I===A||b.isFunction(i);if(y)if(K)for(z in i){if(r.apply(i[z],y)===false)break}else for(;F<I;){if(r.apply(i[F++],y)===false)break}else if(K)for(z in i){if(r.call(i[z],z,i[z])===false)break}else for(y=i[0];F<I&&r.call(y,F,y)!==false;y=i[++F]);return i},trim:R?function(i){return i==null?"":R.call(i)}:function(i){return i==null?"":i.toString().replace(l,"").replace(n,"")},makeArray:function(i,r){var y=r||[];if(i!=null){var z=b.type(i);i.length==
-null||z==="string"||z==="function"||z==="regexp"||b.isWindow(i)?P.call(y,i):b.merge(y,i)}return y},inArray:function(i,r){if(r.indexOf)return r.indexOf(i);for(var y=0,z=r.length;y<z;y++)if(r[y]===i)return y;return-1},merge:function(i,r){var y=i.length,z=0;if(typeof r.length==="number")for(var F=r.length;z<F;z++)i[y++]=r[z];else for(;r[z]!==A;)i[y++]=r[z++];i.length=y;return i},grep:function(i,r,y){var z=[],F;y=!!y;for(var I=0,K=i.length;I<K;I++){F=!!r(i[I],I);y!==F&&z.push(i[I])}return z},map:function(i,
-r,y){for(var z=[],F,I=0,K=i.length;I<K;I++){F=r(i[I],I,y);if(F!=null)z[z.length]=F}return z.concat.apply([],z)},guid:1,proxy:function(i,r,y){if(arguments.length===2)if(typeof r==="string"){y=i;i=y[r];r=A}else if(r&&!b.isFunction(r)){y=r;r=A}if(!r&&i)r=function(){return i.apply(y||this,arguments)};if(i)r.guid=i.guid=i.guid||r.guid||b.guid++;return r},access:function(i,r,y,z,F,I){var K=i.length;if(typeof r==="object"){for(var J in r)b.access(i,J,r[J],z,F,y);return i}if(y!==A){z=!I&&z&&b.isFunction(y);
-for(J=0;J<K;J++)F(i[J],r,z?y.call(i[J],J,F(i[J],r)):y,I);return i}return K?F(i[0],r):A},now:function(){return(new Date).getTime()},uaMatch:function(i){i=i.toLowerCase();i=M.exec(i)||g.exec(i)||j.exec(i)||i.indexOf("compatible")<0&&o.exec(i)||[];return{browser:i[1]||"",version:i[2]||"0"}},browser:{}});b.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(i,r){L["[object "+r+"]"]=r.toLowerCase()});m=b.uaMatch(m);if(m.browser){b.browser[m.browser]=true;b.browser.version=
-m.version}if(b.browser.webkit)b.browser.safari=true;if(Q)b.inArray=function(i,r){return Q.call(r,i)};if(!/\s/.test("\u00a0")){l=/^[\s\xA0]+/;n=/[\s\xA0]+$/}f=b(u);if(u.addEventListener)t=function(){u.removeEventListener("DOMContentLoaded",t,false);b.ready()};else if(u.attachEvent)t=function(){if(u.readyState==="complete"){u.detachEvent("onreadystatechange",t);b.ready()}};return E.jQuery=E.$=b}();(function(){c.support={};var a=u.documentElement,b=u.createElement("script"),d=u.createElement("div"),
-e="script"+c.now();d.style.display="none";d.innerHTML="   <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";var f=d.getElementsByTagName("*"),h=d.getElementsByTagName("a")[0],k=u.createElement("select"),l=k.appendChild(u.createElement("option"));if(!(!f||!f.length||!h)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(h.getAttribute("style")),
-hrefNormalized:h.getAttribute("href")==="/a",opacity:/^0.55$/.test(h.style.opacity),cssFloat:!!h.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:l.selected,optDisabled:false,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null,inlineBlockNeedsLayout:false,shrinkWrapBlocks:false,reliableHiddenOffsets:true};k.disabled=true;c.support.optDisabled=!l.disabled;b.type="text/javascript";try{b.appendChild(u.createTextNode("window."+e+"=1;"))}catch(n){}a.insertBefore(b,
-a.firstChild);if(E[e]){c.support.scriptEval=true;delete E[e]}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function s(){c.support.noCloneEvent=false;d.detachEvent("onclick",s)});d.cloneNode(true).fireEvent("onclick")}d=u.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=u.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var s=u.createElement("div");
-s.style.width=s.style.paddingLeft="1px";u.body.appendChild(s);c.boxModel=c.support.boxModel=s.offsetWidth===2;if("zoom"in s.style){s.style.display="inline";s.style.zoom=1;c.support.inlineBlockNeedsLayout=s.offsetWidth===2;s.style.display="";s.innerHTML="<div style='width:4px;'></div>";c.support.shrinkWrapBlocks=s.offsetWidth!==2}s.innerHTML="<table><tr><td style='padding:0;display:none'></td><td>t</td></tr></table>";var v=s.getElementsByTagName("td");c.support.reliableHiddenOffsets=v[0].offsetHeight===
-0;v[0].style.display="";v[1].style.display="none";c.support.reliableHiddenOffsets=c.support.reliableHiddenOffsets&&v[0].offsetHeight===0;s.innerHTML="";u.body.removeChild(s).style.display="none"});a=function(s){var v=u.createElement("div");s="on"+s;var B=s in v;if(!B){v.setAttribute(s,"return;");B=typeof v[s]==="function"}return B};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=f=h=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",
-cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var pa={},Oa=/^(?:\{.*\}|\[.*\])$/;c.extend({cache:{},uuid:0,expando:"jQuery"+c.now(),noData:{embed:true,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:true},data:function(a,b,d){if(c.acceptData(a)){a=a==E?pa:a;var e=a.nodeType,f=e?a[c.expando]:null,h=c.cache;if(!(e&&!f&&typeof b==="string"&&d===A)){if(e)f||(a[c.expando]=f=++c.uuid);else h=a;if(typeof b==="object")if(e)h[f]=
-c.extend(h[f],b);else c.extend(h,b);else if(e&&!h[f])h[f]={};a=e?h[f]:h;if(d!==A)a[b]=d;return typeof b==="string"?a[b]:a}}},removeData:function(a,b){if(c.acceptData(a)){a=a==E?pa:a;var d=a.nodeType,e=d?a[c.expando]:a,f=c.cache,h=d?f[e]:e;if(b){if(h){delete h[b];d&&c.isEmptyObject(h)&&c.removeData(a)}}else if(d&&c.support.deleteExpando)delete a[c.expando];else if(a.removeAttribute)a.removeAttribute(c.expando);else if(d)delete f[e];else for(var k in a)delete a[k]}},acceptData:function(a){if(a.nodeName){var b=
-c.noData[a.nodeName.toLowerCase()];if(b)return!(b===true||a.getAttribute("classid")!==b)}return true}});c.fn.extend({data:function(a,b){if(typeof a==="undefined")return this.length?c.data(this[0]):null;else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===A){var e=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(e===A&&this.length){e=c.data(this[0],a);if(e===A&&this[0].nodeType===1){e=this[0].getAttribute("data-"+a);if(typeof e===
-"string")try{e=e==="true"?true:e==="false"?false:e==="null"?null:!c.isNaN(e)?parseFloat(e):Oa.test(e)?c.parseJSON(e):e}catch(f){}else e=A}}return e===A&&d[1]?this.data(d[0]):e}else return this.each(function(){var h=c(this),k=[d[0],b];h.triggerHandler("setData"+d[1]+"!",k);c.data(this,a,b);h.triggerHandler("changeData"+d[1]+"!",k)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var e=c.data(a,b);if(!d)return e||
-[];if(!e||c.isArray(d))e=c.data(a,b,c.makeArray(d));else e.push(d);return e}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),e=d.shift();if(e==="inprogress")e=d.shift();if(e){b==="fx"&&d.unshift("inprogress");e.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===A)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,
-a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var qa=/[\n\t]/g,ga=/\s+/,Pa=/\r/g,Qa=/^(?:href|src|style)$/,Ra=/^(?:button|input)$/i,Sa=/^(?:button|input|object|select|textarea)$/i,Ta=/^a(?:rea)?$/i,ra=/^(?:radio|checkbox)$/i;c.fn.extend({attr:function(a,b){return c.access(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,
-a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(s){var v=c(this);v.addClass(a.call(this,s,v.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1)if(f.className){for(var h=" "+f.className+" ",k=f.className,l=0,n=b.length;l<n;l++)if(h.indexOf(" "+b[l]+" ")<0)k+=" "+b[l];f.className=c.trim(k)}else f.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(n){var s=
-c(this);s.removeClass(a.call(this,n,s.attr("class")))});if(a&&typeof a==="string"||a===A)for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1&&f.className)if(a){for(var h=(" "+f.className+" ").replace(qa," "),k=0,l=b.length;k<l;k++)h=h.replace(" "+b[k]+" "," ");f.className=c.trim(h)}else f.className=""}return this},toggleClass:function(a,b){var d=typeof a,e=typeof b==="boolean";if(c.isFunction(a))return this.each(function(f){var h=c(this);h.toggleClass(a.call(this,
-f,h.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var f,h=0,k=c(this),l=b,n=a.split(ga);f=n[h++];){l=e?l:!k.hasClass(f);k[l?"addClass":"removeClass"](f)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(qa," ").indexOf(a)>-1)return true;return false},
-val:function(a){if(!arguments.length){var b=this[0];if(b){if(c.nodeName(b,"option")){var d=b.attributes.value;return!d||d.specified?b.value:b.text}if(c.nodeName(b,"select")){var e=b.selectedIndex;d=[];var f=b.options;b=b.type==="select-one";if(e<0)return null;var h=b?e:0;for(e=b?e+1:f.length;h<e;h++){var k=f[h];if(k.selected&&(c.support.optDisabled?!k.disabled:k.getAttribute("disabled")===null)&&(!k.parentNode.disabled||!c.nodeName(k.parentNode,"optgroup"))){a=c(k).val();if(b)return a;d.push(a)}}return d}if(ra.test(b.type)&&
-!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Pa,"")}return A}var l=c.isFunction(a);return this.each(function(n){var s=c(this),v=a;if(this.nodeType===1){if(l)v=a.call(this,n,s.val());if(v==null)v="";else if(typeof v==="number")v+="";else if(c.isArray(v))v=c.map(v,function(D){return D==null?"":D+""});if(c.isArray(v)&&ra.test(this.type))this.checked=c.inArray(s.val(),v)>=0;else if(c.nodeName(this,"select")){var B=c.makeArray(v);c("option",this).each(function(){this.selected=
-c.inArray(c(this).val(),B)>=0});if(!B.length)this.selectedIndex=-1}else this.value=v}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,e){if(!a||a.nodeType===3||a.nodeType===8)return A;if(e&&b in c.attrFn)return c(a)[b](d);e=a.nodeType!==1||!c.isXMLDoc(a);var f=d!==A;b=e&&c.props[b]||b;if(a.nodeType===1){var h=Qa.test(b);if((b in a||a[b]!==A)&&e&&!h){if(f){b==="type"&&Ra.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");
-if(d===null)a.nodeType===1&&a.removeAttribute(b);else a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:Sa.test(a.nodeName)||Ta.test(a.nodeName)&&a.href?0:A;return a[b]}if(!c.support.style&&e&&b==="style"){if(f)a.style.cssText=""+d;return a.style.cssText}f&&a.setAttribute(b,""+d);if(!a.attributes[b]&&a.hasAttribute&&!a.hasAttribute(b))return A;a=!c.support.hrefNormalized&&e&&
-h?a.getAttribute(b,2):a.getAttribute(b);return a===null?A:a}}});var X=/\.(.*)$/,ha=/^(?:textarea|input|select)$/i,Ha=/\./g,Ia=/ /g,Ua=/[^\w\s.|`]/g,Va=function(a){return a.replace(Ua,"\\$&")},sa={focusin:0,focusout:0};c.event={add:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(c.isWindow(a)&&a!==E&&!a.frameElement)a=E;if(d===false)d=U;var f,h;if(d.handler){f=d;d=f.handler}if(!d.guid)d.guid=c.guid++;if(h=c.data(a)){var k=a.nodeType?"events":"__events__",l=h[k],n=h.handle;if(typeof l===
-"function"){n=l.handle;l=l.events}else if(!l){a.nodeType||(h[k]=h=function(){});h.events=l={}}if(!n)h.handle=n=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(n.elem,arguments):A};n.elem=a;b=b.split(" ");for(var s=0,v;k=b[s++];){h=f?c.extend({},f):{handler:d,data:e};if(k.indexOf(".")>-1){v=k.split(".");k=v.shift();h.namespace=v.slice(0).sort().join(".")}else{v=[];h.namespace=""}h.type=k;if(!h.guid)h.guid=d.guid;var B=l[k],D=c.event.special[k]||{};if(!B){B=l[k]=[];
-if(!D.setup||D.setup.call(a,e,v,n)===false)if(a.addEventListener)a.addEventListener(k,n,false);else a.attachEvent&&a.attachEvent("on"+k,n)}if(D.add){D.add.call(a,h);if(!h.handler.guid)h.handler.guid=d.guid}B.push(h);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(d===false)d=U;var f,h,k=0,l,n,s,v,B,D,H=a.nodeType?"events":"__events__",w=c.data(a),G=w&&w[H];if(w&&G){if(typeof G==="function"){w=G;G=G.events}if(b&&b.type){d=b.handler;b=b.type}if(!b||
-typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(f in G)c.event.remove(a,f+b)}else{for(b=b.split(" ");f=b[k++];){v=f;l=f.indexOf(".")<0;n=[];if(!l){n=f.split(".");f=n.shift();s=RegExp("(^|\\.)"+c.map(n.slice(0).sort(),Va).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(B=G[f])if(d){v=c.event.special[f]||{};for(h=e||0;h<B.length;h++){D=B[h];if(d.guid===D.guid){if(l||s.test(D.namespace)){e==null&&B.splice(h--,1);v.remove&&v.remove.call(a,D)}if(e!=null)break}}if(B.length===0||e!=null&&B.length===1){if(!v.teardown||
-v.teardown.call(a,n)===false)c.removeEvent(a,f,w.handle);delete G[f]}}else for(h=0;h<B.length;h++){D=B[h];if(l||s.test(D.namespace)){c.event.remove(a,v,D.handler,h);B.splice(h--,1)}}}if(c.isEmptyObject(G)){if(b=w.handle)b.elem=null;delete w.events;delete w.handle;if(typeof w==="function")c.removeData(a,H);else c.isEmptyObject(w)&&c.removeData(a)}}}}},trigger:function(a,b,d,e){var f=a.type||a;if(!e){a=typeof a==="object"?a[c.expando]?a:c.extend(c.Event(f),a):c.Event(f);if(f.indexOf("!")>=0){a.type=
-f=f.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[f]&&c.each(c.cache,function(){this.events&&this.events[f]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return A;a.result=A;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(e=d.nodeType?c.data(d,"handle"):(c.data(d,"__events__")||{}).handle)&&e.apply(d,b);e=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+f]&&d["on"+f].apply(d,b)===
-false){a.result=false;a.preventDefault()}}catch(h){}if(!a.isPropagationStopped()&&e)c.event.trigger(a,b,e,true);else if(!a.isDefaultPrevented()){e=a.target;var k,l=f.replace(X,""),n=c.nodeName(e,"a")&&l==="click",s=c.event.special[l]||{};if((!s._default||s._default.call(d,a)===false)&&!n&&!(e&&e.nodeName&&c.noData[e.nodeName.toLowerCase()])){try{if(e[l]){if(k=e["on"+l])e["on"+l]=null;c.event.triggered=true;e[l]()}}catch(v){}if(k)e["on"+l]=k;c.event.triggered=false}}},handle:function(a){var b,d,e;
-d=[];var f,h=c.makeArray(arguments);a=h[0]=c.event.fix(a||E.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;if(!b){e=a.type.split(".");a.type=e.shift();d=e.slice(0).sort();e=RegExp("(^|\\.)"+d.join("\\.(?:.*\\.)?")+"(\\.|$)")}a.namespace=a.namespace||d.join(".");f=c.data(this,this.nodeType?"events":"__events__");if(typeof f==="function")f=f.events;d=(f||{})[a.type];if(f&&d){d=d.slice(0);f=0;for(var k=d.length;f<k;f++){var l=d[f];if(b||e.test(l.namespace)){a.handler=l.handler;a.data=
-l.data;a.handleObj=l;l=l.handler.apply(this,h);if(l!==A){a.result=l;if(l===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
-fix:function(a){if(a[c.expando])return a;var b=a;a=c.Event(b);for(var d=this.props.length,e;d;){e=this.props[--d];a[e]=b[e]}if(!a.target)a.target=a.srcElement||u;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=u.documentElement;d=u.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
-d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(a.which==null&&(a.charCode!=null||a.keyCode!=null))a.which=a.charCode!=null?a.charCode:a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==A)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,Y(a.origType,a.selector),c.extend({},a,{handler:Ga,guid:a.handler.guid}))},remove:function(a){c.event.remove(this,
-Y(a.origType,a.selector),a)}},beforeunload:{setup:function(a,b,d){if(c.isWindow(this))this.onbeforeunload=d},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};c.removeEvent=u.removeEventListener?function(a,b,d){a.removeEventListener&&a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent&&a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=a;this.type=a.type}else this.type=a;this.timeStamp=
-c.now();this[c.expando]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=ba;var a=this.originalEvent;if(a)if(a.preventDefault)a.preventDefault();else a.returnValue=false},stopPropagation:function(){this.isPropagationStopped=ba;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=ba;this.stopPropagation()},isDefaultPrevented:U,isPropagationStopped:U,isImmediatePropagationStopped:U};
-var ta=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},ua=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?ua:ta,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?ua:ta)}}});if(!c.support.submitBubbles)c.event.special.submit={setup:function(){if(this.nodeName.toLowerCase()!==
-"form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length){a.liveFired=A;return ja("submit",this,arguments)}});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13){a.liveFired=A;return ja("submit",this,arguments)}})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};if(!c.support.changeBubbles){var V,
-va=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(e){return e.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},Z=function(a,b){var d=a.target,e,f;if(!(!ha.test(d.nodeName)||d.readOnly)){e=c.data(d,"_change_data");f=va(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",f);if(!(e===A||f===e))if(e!=null||f){a.type="change";a.liveFired=
-A;return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:Z,beforedeactivate:Z,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return Z.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return Z.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,"_change_data",va(a))}},setup:function(){if(this.type===
-"file")return false;for(var a in V)c.event.add(this,a+".specialChange",V[a]);return ha.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return ha.test(this.nodeName)}};V=c.event.special.change.filters;V.focus=V.beforeactivate}u.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(e){e=c.event.fix(e);e.type=b;return c.event.trigger(e,null,e.target)}c.event.special[b]={setup:function(){sa[b]++===0&&u.addEventListener(a,d,true)},teardown:function(){--sa[b]===
-0&&u.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,e,f){if(typeof d==="object"){for(var h in d)this[b](h,e,d[h],f);return this}if(c.isFunction(e)||e===false){f=e;e=A}var k=b==="one"?c.proxy(f,function(n){c(this).unbind(n,k);return f.apply(this,arguments)}):f;if(d==="unload"&&b!=="one")this.one(d,e,f);else{h=0;for(var l=this.length;h<l;h++)c.event.add(this[h],d,k,e)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&!a.preventDefault)for(var d in a)this.unbind(d,
-a[d]);else{d=0;for(var e=this.length;d<e;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,e){return this.live(b,d,e,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){var d=c.Event(a);d.preventDefault();d.stopPropagation();c.event.trigger(d,b,this[0]);return d.result}},toggle:function(a){for(var b=arguments,d=
-1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(e){var f=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,f+1);e.preventDefault();return b[f].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var wa={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,e,f,h){var k,l=0,n,s,v=h||this.selector;h=h?this:c(this.context);if(typeof d===
-"object"&&!d.preventDefault){for(k in d)h[b](k,e,d[k],v);return this}if(c.isFunction(e)){f=e;e=A}for(d=(d||"").split(" ");(k=d[l++])!=null;){n=X.exec(k);s="";if(n){s=n[0];k=k.replace(X,"")}if(k==="hover")d.push("mouseenter"+s,"mouseleave"+s);else{n=k;if(k==="focus"||k==="blur"){d.push(wa[k]+s);k+=s}else k=(wa[k]||k)+s;if(b==="live"){s=0;for(var B=h.length;s<B;s++)c.event.add(h[s],"live."+Y(k,v),{data:e,selector:v,handler:f,origType:k,origHandler:f,preType:n})}else h.unbind("live."+Y(k,v),f)}}return this}});
-c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),function(a,b){c.fn[b]=function(d,e){if(e==null){e=d;d=null}return arguments.length>0?this.bind(b,d,e):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});E.attachEvent&&!E.addEventListener&&c(E).bind("unload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});
-(function(){function a(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1&&!q){x.sizcache=o;x.sizset=p}if(x.nodeName.toLowerCase()===j){C=x;break}x=x[g]}m[p]=C}}}function b(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1){if(!q){x.sizcache=o;x.sizset=p}if(typeof j!=="string"){if(x===j){C=true;break}}else if(l.filter(j,
-[x]).length>0){C=x;break}}x=x[g]}m[p]=C}}}var d=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,h=false,k=true;[0,0].sort(function(){k=false;return 0});var l=function(g,j,o,m){o=o||[];var p=j=j||u;if(j.nodeType!==1&&j.nodeType!==9)return[];if(!g||typeof g!=="string")return o;var q=[],t,x,C,P,N=true,R=l.isXML(j),Q=g,L;do{d.exec("");if(t=d.exec(Q)){Q=t[3];q.push(t[1]);if(t[2]){P=t[3];
-break}}}while(t);if(q.length>1&&s.exec(g))if(q.length===2&&n.relative[q[0]])x=M(q[0]+q[1],j);else for(x=n.relative[q[0]]?[j]:l(q.shift(),j);q.length;){g=q.shift();if(n.relative[g])g+=q.shift();x=M(g,x)}else{if(!m&&q.length>1&&j.nodeType===9&&!R&&n.match.ID.test(q[0])&&!n.match.ID.test(q[q.length-1])){t=l.find(q.shift(),j,R);j=t.expr?l.filter(t.expr,t.set)[0]:t.set[0]}if(j){t=m?{expr:q.pop(),set:D(m)}:l.find(q.pop(),q.length===1&&(q[0]==="~"||q[0]==="+")&&j.parentNode?j.parentNode:j,R);x=t.expr?l.filter(t.expr,
-t.set):t.set;if(q.length>0)C=D(x);else N=false;for(;q.length;){t=L=q.pop();if(n.relative[L])t=q.pop();else L="";if(t==null)t=j;n.relative[L](C,t,R)}}else C=[]}C||(C=x);C||l.error(L||g);if(f.call(C)==="[object Array]")if(N)if(j&&j.nodeType===1)for(g=0;C[g]!=null;g++){if(C[g]&&(C[g]===true||C[g].nodeType===1&&l.contains(j,C[g])))o.push(x[g])}else for(g=0;C[g]!=null;g++)C[g]&&C[g].nodeType===1&&o.push(x[g]);else o.push.apply(o,C);else D(C,o);if(P){l(P,p,o,m);l.uniqueSort(o)}return o};l.uniqueSort=function(g){if(w){h=
-k;g.sort(w);if(h)for(var j=1;j<g.length;j++)g[j]===g[j-1]&&g.splice(j--,1)}return g};l.matches=function(g,j){return l(g,null,null,j)};l.matchesSelector=function(g,j){return l(j,null,null,[g]).length>0};l.find=function(g,j,o){var m;if(!g)return[];for(var p=0,q=n.order.length;p<q;p++){var t=n.order[p],x;if(x=n.leftMatch[t].exec(g)){var C=x[1];x.splice(1,1);if(C.substr(C.length-1)!=="\\"){x[1]=(x[1]||"").replace(/\\/g,"");m=n.find[t](x,j,o);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=j.getElementsByTagName("*"));
-return{set:m,expr:g}};l.filter=function(g,j,o,m){for(var p=g,q=[],t=j,x,C,P=j&&j[0]&&l.isXML(j[0]);g&&j.length;){for(var N in n.filter)if((x=n.leftMatch[N].exec(g))!=null&&x[2]){var R=n.filter[N],Q,L;L=x[1];C=false;x.splice(1,1);if(L.substr(L.length-1)!=="\\"){if(t===q)q=[];if(n.preFilter[N])if(x=n.preFilter[N](x,t,o,q,m,P)){if(x===true)continue}else C=Q=true;if(x)for(var i=0;(L=t[i])!=null;i++)if(L){Q=R(L,x,i,t);var r=m^!!Q;if(o&&Q!=null)if(r)C=true;else t[i]=false;else if(r){q.push(L);C=true}}if(Q!==
-A){o||(t=q);g=g.replace(n.match[N],"");if(!C)return[];break}}}if(g===p)if(C==null)l.error(g);else break;p=g}return t};l.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=l.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+\-]*)\))?/,
-POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},relative:{"+":function(g,j){var o=typeof j==="string",m=o&&!/\W/.test(j);o=o&&!m;if(m)j=j.toLowerCase();m=0;for(var p=g.length,q;m<p;m++)if(q=g[m]){for(;(q=q.previousSibling)&&q.nodeType!==1;);g[m]=o||q&&q.nodeName.toLowerCase()===
-j?q||false:q===j}o&&l.filter(j,g,true)},">":function(g,j){var o=typeof j==="string",m,p=0,q=g.length;if(o&&!/\W/.test(j))for(j=j.toLowerCase();p<q;p++){if(m=g[p]){o=m.parentNode;g[p]=o.nodeName.toLowerCase()===j?o:false}}else{for(;p<q;p++)if(m=g[p])g[p]=o?m.parentNode:m.parentNode===j;o&&l.filter(j,g,true)}},"":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q=j=j.toLowerCase();p=a}p("parentNode",j,m,g,q,o)},"~":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q=
-j=j.toLowerCase();p=a}p("previousSibling",j,m,g,q,o)}},find:{ID:function(g,j,o){if(typeof j.getElementById!=="undefined"&&!o)return(g=j.getElementById(g[1]))&&g.parentNode?[g]:[]},NAME:function(g,j){if(typeof j.getElementsByName!=="undefined"){for(var o=[],m=j.getElementsByName(g[1]),p=0,q=m.length;p<q;p++)m[p].getAttribute("name")===g[1]&&o.push(m[p]);return o.length===0?null:o}},TAG:function(g,j){return j.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,j,o,m,p,q){g=" "+g[1].replace(/\\/g,
-"")+" ";if(q)return g;q=0;for(var t;(t=j[q])!=null;q++)if(t)if(p^(t.className&&(" "+t.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))o||m.push(t);else if(o)j[q]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},CHILD:function(g){if(g[1]==="nth"){var j=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=j[1]+(j[2]||1)-0;g[3]=j[3]-0}g[0]=e++;return g},ATTR:function(g,j,o,
-m,p,q){j=g[1].replace(/\\/g,"");if(!q&&n.attrMap[j])g[1]=n.attrMap[j];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,j,o,m,p){if(g[1]==="not")if((d.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=l(g[3],null,null,j);else{g=l.filter(g[3],j,o,true^p);o||m.push.apply(m,g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===
-true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,j,o){return!!l(o[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===
-g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},setFilters:{first:function(g,j){return j===0},last:function(g,j,o,m){return j===m.length-1},even:function(g,j){return j%2===0},odd:function(g,j){return j%2===1},lt:function(g,j,o){return j<o[3]-0},gt:function(g,j,o){return j>o[3]-0},nth:function(g,j,o){return o[3]-
-0===j},eq:function(g,j,o){return o[3]-0===j}},filter:{PSEUDO:function(g,j,o,m){var p=j[1],q=n.filters[p];if(q)return q(g,o,j,m);else if(p==="contains")return(g.textContent||g.innerText||l.getText([g])||"").indexOf(j[3])>=0;else if(p==="not"){j=j[3];o=0;for(m=j.length;o<m;o++)if(j[o]===g)return false;return true}else l.error("Syntax error, unrecognized expression: "+p)},CHILD:function(g,j){var o=j[1],m=g;switch(o){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(o===
-"first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":o=j[2];var p=j[3];if(o===1&&p===0)return true;var q=j[0],t=g.parentNode;if(t&&(t.sizcache!==q||!g.nodeIndex)){var x=0;for(m=t.firstChild;m;m=m.nextSibling)if(m.nodeType===1)m.nodeIndex=++x;t.sizcache=q}m=g.nodeIndex-p;return o===0?m===0:m%o===0&&m/o>=0}},ID:function(g,j){return g.nodeType===1&&g.getAttribute("id")===j},TAG:function(g,j){return j==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===
-j},CLASS:function(g,j){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(j)>-1},ATTR:function(g,j){var o=j[1];o=n.attrHandle[o]?n.attrHandle[o](g):g[o]!=null?g[o]:g.getAttribute(o);var m=o+"",p=j[2],q=j[4];return o==null?p==="!=":p==="="?m===q:p==="*="?m.indexOf(q)>=0:p==="~="?(" "+m+" ").indexOf(q)>=0:!q?m&&o!==false:p==="!="?m!==q:p==="^="?m.indexOf(q)===0:p==="$="?m.substr(m.length-q.length)===q:p==="|="?m===q||m.substr(0,q.length+1)===q+"-":false},POS:function(g,j,o,m){var p=n.setFilters[j[2]];
-if(p)return p(g,o,j,m)}}},s=n.match.POS,v=function(g,j){return"\\"+(j-0+1)},B;for(B in n.match){n.match[B]=RegExp(n.match[B].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[B]=RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[B].source.replace(/\\(\d+)/g,v))}var D=function(g,j){g=Array.prototype.slice.call(g,0);if(j){j.push.apply(j,g);return j}return g};try{Array.prototype.slice.call(u.documentElement.childNodes,0)}catch(H){D=function(g,j){var o=j||[],m=0;if(f.call(g)==="[object Array]")Array.prototype.push.apply(o,
-g);else if(typeof g.length==="number")for(var p=g.length;m<p;m++)o.push(g[m]);else for(;g[m];m++)o.push(g[m]);return o}}var w,G;if(u.documentElement.compareDocumentPosition)w=function(g,j){if(g===j){h=true;return 0}if(!g.compareDocumentPosition||!j.compareDocumentPosition)return g.compareDocumentPosition?-1:1;return g.compareDocumentPosition(j)&4?-1:1};else{w=function(g,j){var o=[],m=[],p=g.parentNode,q=j.parentNode,t=p;if(g===j){h=true;return 0}else if(p===q)return G(g,j);else if(p){if(!q)return 1}else return-1;
-for(;t;){o.unshift(t);t=t.parentNode}for(t=q;t;){m.unshift(t);t=t.parentNode}p=o.length;q=m.length;for(t=0;t<p&&t<q;t++)if(o[t]!==m[t])return G(o[t],m[t]);return t===p?G(g,m[t],-1):G(o[t],j,1)};G=function(g,j,o){if(g===j)return o;for(g=g.nextSibling;g;){if(g===j)return-1;g=g.nextSibling}return 1}}l.getText=function(g){for(var j="",o,m=0;g[m];m++){o=g[m];if(o.nodeType===3||o.nodeType===4)j+=o.nodeValue;else if(o.nodeType!==8)j+=l.getText(o.childNodes)}return j};(function(){var g=u.createElement("div"),
-j="script"+(new Date).getTime();g.innerHTML="<a name='"+j+"'/>";var o=u.documentElement;o.insertBefore(g,o.firstChild);if(u.getElementById(j)){n.find.ID=function(m,p,q){if(typeof p.getElementById!=="undefined"&&!q)return(p=p.getElementById(m[1]))?p.id===m[1]||typeof p.getAttributeNode!=="undefined"&&p.getAttributeNode("id").nodeValue===m[1]?[p]:A:[]};n.filter.ID=function(m,p){var q=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&q&&q.nodeValue===p}}o.removeChild(g);
-o=g=null})();(function(){var g=u.createElement("div");g.appendChild(u.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(j,o){var m=o.getElementsByTagName(j[1]);if(j[1]==="*"){for(var p=[],q=0;m[q];q++)m[q].nodeType===1&&p.push(m[q]);m=p}return m};g.innerHTML="<a href='#'></a>";if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(j){return j.getAttribute("href",2)};g=null})();u.querySelectorAll&&
-function(){var g=l,j=u.createElement("div");j.innerHTML="<p class='TEST'></p>";if(!(j.querySelectorAll&&j.querySelectorAll(".TEST").length===0)){l=function(m,p,q,t){p=p||u;if(!t&&!l.isXML(p))if(p.nodeType===9)try{return D(p.querySelectorAll(m),q)}catch(x){}else if(p.nodeType===1&&p.nodeName.toLowerCase()!=="object"){var C=p.id,P=p.id="__sizzle__";try{return D(p.querySelectorAll("#"+P+" "+m),q)}catch(N){}finally{if(C)p.id=C;else p.removeAttribute("id")}}return g(m,p,q,t)};for(var o in g)l[o]=g[o];
-j=null}}();(function(){var g=u.documentElement,j=g.matchesSelector||g.mozMatchesSelector||g.webkitMatchesSelector||g.msMatchesSelector,o=false;try{j.call(u.documentElement,":sizzle")}catch(m){o=true}if(j)l.matchesSelector=function(p,q){try{if(o||!n.match.PSEUDO.test(q))return j.call(p,q)}catch(t){}return l(q,null,null,[p]).length>0}})();(function(){var g=u.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===
-0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(j,o,m){if(typeof o.getElementsByClassName!=="undefined"&&!m)return o.getElementsByClassName(j[1])};g=null}}})();l.contains=u.documentElement.contains?function(g,j){return g!==j&&(g.contains?g.contains(j):true)}:function(g,j){return!!(g.compareDocumentPosition(j)&16)};l.isXML=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false};var M=function(g,
-j){for(var o=[],m="",p,q=j.nodeType?[j]:j;p=n.match.PSEUDO.exec(g);){m+=p[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;p=0;for(var t=q.length;p<t;p++)l(g,q[p],o);return l.filter(m,o)};c.find=l;c.expr=l.selectors;c.expr[":"]=c.expr.filters;c.unique=l.uniqueSort;c.text=l.getText;c.isXMLDoc=l.isXML;c.contains=l.contains})();var Wa=/Until$/,Xa=/^(?:parents|prevUntil|prevAll)/,Ya=/,/,Ja=/^.[^:#\[\.,]*$/,Za=Array.prototype.slice,$a=c.expr.match.POS;c.fn.extend({find:function(a){for(var b=this.pushStack("",
-"find",a),d=0,e=0,f=this.length;e<f;e++){d=b.length;c.find(a,this[e],b);if(e>0)for(var h=d;h<b.length;h++)for(var k=0;k<d;k++)if(b[k]===b[h]){b.splice(h--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,e=b.length;d<e;d++)if(c.contains(this,b[d]))return true})},not:function(a){return this.pushStack(ka(this,a,false),"not",a)},filter:function(a){return this.pushStack(ka(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a,
-b){var d=[],e,f,h=this[0];if(c.isArray(a)){var k={},l,n=1;if(h&&a.length){e=0;for(f=a.length;e<f;e++){l=a[e];k[l]||(k[l]=c.expr.match.POS.test(l)?c(l,b||this.context):l)}for(;h&&h.ownerDocument&&h!==b;){for(l in k){e=k[l];if(e.jquery?e.index(h)>-1:c(h).is(e))d.push({selector:l,elem:h,level:n})}h=h.parentNode;n++}}return d}k=$a.test(a)?c(a,b||this.context):null;e=0;for(f=this.length;e<f;e++)for(h=this[e];h;)if(k?k.index(h)>-1:c.find.matchesSelector(h,a)){d.push(h);break}else{h=h.parentNode;if(!h||
-!h.ownerDocument||h===b)break}d=d.length>1?c.unique(d):d;return this.pushStack(d,"closest",a)},index:function(a){if(!a||typeof a==="string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var d=typeof a==="string"?c(a,b||this.context):c.makeArray(a),e=c.merge(this.get(),d);return this.pushStack(!d[0]||!d[0].parentNode||d[0].parentNode.nodeType===11||!e[0]||!e[0].parentNode||e[0].parentNode.nodeType===11?e:c.unique(e))},andSelf:function(){return this.add(this.prevObject)}});
-c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",
-d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,e){var f=c.map(this,b,d);Wa.test(a)||(e=d);if(e&&typeof e==="string")f=c.filter(e,f);f=this.length>1?c.unique(f):f;if((this.length>1||Ya.test(e))&&Xa.test(a))f=f.reverse();return this.pushStack(f,a,Za.call(arguments).join(","))}});
-c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return b.length===1?c.find.matchesSelector(b[0],a)?[b[0]]:[]:c.find.matches(a,b)},dir:function(a,b,d){var e=[];for(a=a[b];a&&a.nodeType!==9&&(d===A||a.nodeType!==1||!c(a).is(d));){a.nodeType===1&&e.push(a);a=a[b]}return e},nth:function(a,b,d){b=b||1;for(var e=0;a;a=a[d])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var xa=/ jQuery\d+="(?:\d+|null)"/g,
-$=/^\s+/,ya=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,za=/<([\w:]+)/,ab=/<tbody/i,bb=/<|&#?\w+;/,Aa=/<(?:script|object|embed|option|style)/i,Ba=/checked\s*(?:[^=]|=\s*.checked.)/i,cb=/\=([^="'>\s]+\/)>/g,O={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],
-area:[1,"<map>","</map>"],_default:[0,"",""]};O.optgroup=O.option;O.tbody=O.tfoot=O.colgroup=O.caption=O.thead;O.th=O.td;if(!c.support.htmlSerialize)O._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==A)return this.empty().append((this[0]&&this[0].ownerDocument||u).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,
-d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},
-unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=
-c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,e;(e=this[d])!=null;d++)if(!a||c.filter(a,[e]).length){if(!b&&e.nodeType===1){c.cleanData(e.getElementsByTagName("*"));
-c.cleanData([e])}e.parentNode&&e.parentNode.removeChild(e)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,e=this.ownerDocument;if(!d){d=e.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(xa,"").replace(cb,'="$1">').replace($,
-"")],e)[0]}else return this.cloneNode(true)});if(a===true){la(this,b);la(this.find("*"),b.find("*"))}return b},html:function(a){if(a===A)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(xa,""):null;else if(typeof a==="string"&&!Aa.test(a)&&(c.support.leadingWhitespace||!$.test(a))&&!O[(za.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ya,"<$1></$2>");try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(e){this.empty().append(a)}}else c.isFunction(a)?
-this.each(function(f){var h=c(this);h.html(a.call(this,f,h.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=c(this),e=d.html();d.replaceWith(a.call(this,b,e))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a,
-true)},domManip:function(a,b,d){var e,f,h=a[0],k=[],l;if(!c.support.checkClone&&arguments.length===3&&typeof h==="string"&&Ba.test(h))return this.each(function(){c(this).domManip(a,b,d,true)});if(c.isFunction(h))return this.each(function(s){var v=c(this);a[0]=h.call(this,s,b?v.html():A);v.domManip(a,b,d)});if(this[0]){e=h&&h.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:c.buildFragment(a,this,k);l=e.fragment;if(f=l.childNodes.length===1?l=l.firstChild:
-l.firstChild){b=b&&c.nodeName(f,"tr");f=0;for(var n=this.length;f<n;f++)d.call(b?c.nodeName(this[f],"table")?this[f].getElementsByTagName("tbody")[0]||this[f].appendChild(this[f].ownerDocument.createElement("tbody")):this[f]:this[f],f>0||e.cacheable||this.length>1?l.cloneNode(true):l)}k.length&&c.each(k,Ka)}return this}});c.buildFragment=function(a,b,d){var e,f,h;b=b&&b[0]?b[0].ownerDocument||b[0]:u;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===u&&!Aa.test(a[0])&&(c.support.checkClone||
-!Ba.test(a[0]))){f=true;if(h=c.fragments[a[0]])if(h!==1)e=h}if(!e){e=b.createDocumentFragment();c.clean(a,b,e,d)}if(f)c.fragments[a[0]]=h?e:1;return{fragment:e,cacheable:f}};c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var e=[];d=c(d);var f=this.length===1&&this[0].parentNode;if(f&&f.nodeType===11&&f.childNodes.length===1&&d.length===1){d[b](this[0]);return this}else{f=0;for(var h=
-d.length;f<h;f++){var k=(f>0?this.clone(true):this).get();c(d[f])[b](k);e=e.concat(k)}return this.pushStack(e,a,d.selector)}}});c.extend({clean:function(a,b,d,e){b=b||u;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||u;for(var f=[],h=0,k;(k=a[h])!=null;h++){if(typeof k==="number")k+="";if(k){if(typeof k==="string"&&!bb.test(k))k=b.createTextNode(k);else if(typeof k==="string"){k=k.replace(ya,"<$1></$2>");var l=(za.exec(k)||["",""])[1].toLowerCase(),n=O[l]||O._default,
-s=n[0],v=b.createElement("div");for(v.innerHTML=n[1]+k+n[2];s--;)v=v.lastChild;if(!c.support.tbody){s=ab.test(k);l=l==="table"&&!s?v.firstChild&&v.firstChild.childNodes:n[1]==="<table>"&&!s?v.childNodes:[];for(n=l.length-1;n>=0;--n)c.nodeName(l[n],"tbody")&&!l[n].childNodes.length&&l[n].parentNode.removeChild(l[n])}!c.support.leadingWhitespace&&$.test(k)&&v.insertBefore(b.createTextNode($.exec(k)[0]),v.firstChild);k=v.childNodes}if(k.nodeType)f.push(k);else f=c.merge(f,k)}}if(d)for(h=0;f[h];h++)if(e&&
-c.nodeName(f[h],"script")&&(!f[h].type||f[h].type.toLowerCase()==="text/javascript"))e.push(f[h].parentNode?f[h].parentNode.removeChild(f[h]):f[h]);else{f[h].nodeType===1&&f.splice.apply(f,[h+1,0].concat(c.makeArray(f[h].getElementsByTagName("script"))));d.appendChild(f[h])}return f},cleanData:function(a){for(var b,d,e=c.cache,f=c.event.special,h=c.support.deleteExpando,k=0,l;(l=a[k])!=null;k++)if(!(l.nodeName&&c.noData[l.nodeName.toLowerCase()]))if(d=l[c.expando]){if((b=e[d])&&b.events)for(var n in b.events)f[n]?
-c.event.remove(l,n):c.removeEvent(l,n,b.handle);if(h)delete l[c.expando];else l.removeAttribute&&l.removeAttribute(c.expando);delete e[d]}}});var Ca=/alpha\([^)]*\)/i,db=/opacity=([^)]*)/,eb=/-([a-z])/ig,fb=/([A-Z])/g,Da=/^-?\d+(?:px)?$/i,gb=/^-?\d/,hb={position:"absolute",visibility:"hidden",display:"block"},La=["Left","Right"],Ma=["Top","Bottom"],W,ib=u.defaultView&&u.defaultView.getComputedStyle,jb=function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){if(arguments.length===2&&b===A)return this;
-return c.access(this,a,b,true,function(d,e,f){return f!==A?c.style(d,e,f):c.css(d,e)})};c.extend({cssHooks:{opacity:{get:function(a,b){if(b){var d=W(a,"opacity","opacity");return d===""?"1":d}else return a.style.opacity}}},cssNumber:{zIndex:true,fontWeight:true,opacity:true,zoom:true,lineHeight:true},cssProps:{"float":c.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,d,e){if(!(!a||a.nodeType===3||a.nodeType===8||!a.style)){var f,h=c.camelCase(b),k=a.style,l=c.cssHooks[h];b=c.cssProps[h]||
-h;if(d!==A){if(!(typeof d==="number"&&isNaN(d)||d==null)){if(typeof d==="number"&&!c.cssNumber[h])d+="px";if(!l||!("set"in l)||(d=l.set(a,d))!==A)try{k[b]=d}catch(n){}}}else{if(l&&"get"in l&&(f=l.get(a,false,e))!==A)return f;return k[b]}}},css:function(a,b,d){var e,f=c.camelCase(b),h=c.cssHooks[f];b=c.cssProps[f]||f;if(h&&"get"in h&&(e=h.get(a,true,d))!==A)return e;else if(W)return W(a,b,f)},swap:function(a,b,d){var e={},f;for(f in b){e[f]=a.style[f];a.style[f]=b[f]}d.call(a);for(f in b)a.style[f]=
-e[f]},camelCase:function(a){return a.replace(eb,jb)}});c.curCSS=c.css;c.each(["height","width"],function(a,b){c.cssHooks[b]={get:function(d,e,f){var h;if(e){if(d.offsetWidth!==0)h=ma(d,b,f);else c.swap(d,hb,function(){h=ma(d,b,f)});return h+"px"}},set:function(d,e){if(Da.test(e)){e=parseFloat(e);if(e>=0)return e+"px"}else return e}}});if(!c.support.opacity)c.cssHooks.opacity={get:function(a,b){return db.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":
-b?"1":""},set:function(a,b){var d=a.style;d.zoom=1;var e=c.isNaN(b)?"":"alpha(opacity="+b*100+")",f=d.filter||"";d.filter=Ca.test(f)?f.replace(Ca,e):d.filter+" "+e}};if(ib)W=function(a,b,d){var e;d=d.replace(fb,"-$1").toLowerCase();if(!(b=a.ownerDocument.defaultView))return A;if(b=b.getComputedStyle(a,null)){e=b.getPropertyValue(d);if(e===""&&!c.contains(a.ownerDocument.documentElement,a))e=c.style(a,d)}return e};else if(u.documentElement.currentStyle)W=function(a,b){var d,e,f=a.currentStyle&&a.currentStyle[b],
-h=a.style;if(!Da.test(f)&&gb.test(f)){d=h.left;e=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;h.left=b==="fontSize"?"1em":f||0;f=h.pixelLeft+"px";h.left=d;a.runtimeStyle.left=e}return f};if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=a.offsetHeight;return a.offsetWidth===0&&b===0||!c.support.reliableHiddenOffsets&&(a.style.display||c.css(a,"display"))==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var kb=c.now(),lb=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
-mb=/^(?:select|textarea)/i,nb=/^(?:color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,ob=/^(?:GET|HEAD|DELETE)$/,Na=/\[\]$/,T=/\=\?(&|$)/,ia=/\?/,pb=/([?&])_=[^&]*/,qb=/^(\w+:)?\/\/([^\/?#]+)/,rb=/%20/g,sb=/#.*$/,Ea=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!=="string"&&Ea)return Ea.apply(this,arguments);else if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var f=a.slice(e,a.length);a=a.slice(0,e)}e="GET";if(b)if(c.isFunction(b)){d=
-b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);e="POST"}var h=this;c.ajax({url:a,type:e,dataType:"html",data:b,complete:function(k,l){if(l==="success"||l==="notmodified")h.html(f?c("<div>").append(k.responseText.replace(lb,"")).find(f):k.responseText);d&&h.each(d,[k.responseText,l,k])}});return this},serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&&
-!this.disabled&&(this.checked||mb.test(this.nodeName)||nb.test(this.type))}).map(function(a,b){var d=c(this).val();return d==null?null:c.isArray(d)?c.map(d,function(e){return{name:b.name,value:e}}):{name:b.name,value:d}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:e})},
-getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:e})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return new E.XMLHttpRequest},accepts:{xml:"application/xml, text/xml",html:"text/html",
-script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},ajax:function(a){var b=c.extend(true,{},c.ajaxSettings,a),d,e,f,h=b.type.toUpperCase(),k=ob.test(h);b.url=b.url.replace(sb,"");b.context=a&&a.context!=null?a.context:b;if(b.data&&b.processData&&typeof b.data!=="string")b.data=c.param(b.data,b.traditional);if(b.dataType==="jsonp"){if(h==="GET")T.test(b.url)||(b.url+=(ia.test(b.url)?"&":"?")+(b.jsonp||"callback")+"=?");else if(!b.data||
-!T.test(b.data))b.data=(b.data?b.data+"&":"")+(b.jsonp||"callback")+"=?";b.dataType="json"}if(b.dataType==="json"&&(b.data&&T.test(b.data)||T.test(b.url))){d=b.jsonpCallback||"jsonp"+kb++;if(b.data)b.data=(b.data+"").replace(T,"="+d+"$1");b.url=b.url.replace(T,"="+d+"$1");b.dataType="script";var l=E[d];E[d]=function(m){f=m;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);if(c.isFunction(l))l(m);else{E[d]=A;try{delete E[d]}catch(p){}}v&&v.removeChild(B)}}if(b.dataType==="script"&&b.cache===null)b.cache=
-false;if(b.cache===false&&h==="GET"){var n=c.now(),s=b.url.replace(pb,"$1_="+n);b.url=s+(s===b.url?(ia.test(b.url)?"&":"?")+"_="+n:"")}if(b.data&&h==="GET")b.url+=(ia.test(b.url)?"&":"?")+b.data;b.global&&c.active++===0&&c.event.trigger("ajaxStart");n=(n=qb.exec(b.url))&&(n[1]&&n[1]!==location.protocol||n[2]!==location.host);if(b.dataType==="script"&&h==="GET"&&n){var v=u.getElementsByTagName("head")[0]||u.documentElement,B=u.createElement("script");if(b.scriptCharset)B.charset=b.scriptCharset;B.src=
-b.url;if(!d){var D=false;B.onload=B.onreadystatechange=function(){if(!D&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){D=true;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);B.onload=B.onreadystatechange=null;v&&B.parentNode&&v.removeChild(B)}}}v.insertBefore(B,v.firstChild);return A}var H=false,w=b.xhr();if(w){b.username?w.open(h,b.url,b.async,b.username,b.password):w.open(h,b.url,b.async);try{if(b.data!=null&&!k||a&&a.contentType)w.setRequestHeader("Content-Type",
-b.contentType);if(b.ifModified){c.lastModified[b.url]&&w.setRequestHeader("If-Modified-Since",c.lastModified[b.url]);c.etag[b.url]&&w.setRequestHeader("If-None-Match",c.etag[b.url])}n||w.setRequestHeader("X-Requested-With","XMLHttpRequest");w.setRequestHeader("Accept",b.dataType&&b.accepts[b.dataType]?b.accepts[b.dataType]+", */*; q=0.01":b.accepts._default)}catch(G){}if(b.beforeSend&&b.beforeSend.call(b.context,w,b)===false){b.global&&c.active--===1&&c.event.trigger("ajaxStop");w.abort();return false}b.global&&
-c.triggerGlobal(b,"ajaxSend",[w,b]);var M=w.onreadystatechange=function(m){if(!w||w.readyState===0||m==="abort"){H||c.handleComplete(b,w,e,f);H=true;if(w)w.onreadystatechange=c.noop}else if(!H&&w&&(w.readyState===4||m==="timeout")){H=true;w.onreadystatechange=c.noop;e=m==="timeout"?"timeout":!c.httpSuccess(w)?"error":b.ifModified&&c.httpNotModified(w,b.url)?"notmodified":"success";var p;if(e==="success")try{f=c.httpData(w,b.dataType,b)}catch(q){e="parsererror";p=q}if(e==="success"||e==="notmodified")d||
-c.handleSuccess(b,w,e,f);else c.handleError(b,w,e,p);d||c.handleComplete(b,w,e,f);m==="timeout"&&w.abort();if(b.async)w=null}};try{var g=w.abort;w.abort=function(){w&&g.call&&g.call(w);M("abort")}}catch(j){}b.async&&b.timeout>0&&setTimeout(function(){w&&!H&&M("timeout")},b.timeout);try{w.send(k||b.data==null?null:b.data)}catch(o){c.handleError(b,w,null,o);c.handleComplete(b,w,e,f)}b.async||M();return w}},param:function(a,b){var d=[],e=function(h,k){k=c.isFunction(k)?k():k;d[d.length]=encodeURIComponent(h)+
-"="+encodeURIComponent(k)};if(b===A)b=c.ajaxSettings.traditional;if(c.isArray(a)||a.jquery)c.each(a,function(){e(this.name,this.value)});else for(var f in a)ca(f,a[f],b,e);return d.join("&").replace(rb,"+")}});c.extend({active:0,lastModified:{},etag:{},handleError:function(a,b,d,e){a.error&&a.error.call(a.context,b,d,e);a.global&&c.triggerGlobal(a,"ajaxError",[b,a,e])},handleSuccess:function(a,b,d,e){a.success&&a.success.call(a.context,e,d,b);a.global&&c.triggerGlobal(a,"ajaxSuccess",[b,a])},handleComplete:function(a,
-b,d){a.complete&&a.complete.call(a.context,b,d);a.global&&c.triggerGlobal(a,"ajaxComplete",[b,a]);a.global&&c.active--===1&&c.event.trigger("ajaxStop")},triggerGlobal:function(a,b,d){(a.context&&a.context.url==null?c(a.context):c.event).trigger(b,d)},httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===1223}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),e=a.getResponseHeader("Etag");
-if(d)c.lastModified[b]=d;if(e)c.etag[b]=e;return a.status===304},httpData:function(a,b,d){var e=a.getResponseHeader("content-type")||"",f=b==="xml"||!b&&e.indexOf("xml")>=0;a=f?a.responseXML:a.responseText;f&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b==="json"||!b&&e.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&e.indexOf("javascript")>=0)c.globalEval(a);return a}});if(E.ActiveXObject)c.ajaxSettings.xhr=
-function(){if(E.location.protocol!=="file:")try{return new E.XMLHttpRequest}catch(a){}try{return new E.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}};c.support.ajax=!!c.ajaxSettings.xhr();var da={},tb=/^(?:toggle|show|hide)$/,ub=/^([+\-]=)?([\d+.\-]+)(.*)$/,aa,na=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b,d){if(a||a===0)return this.animate(S("show",3),a,b,d);else{a=
-0;for(b=this.length;a<b;a++){if(!c.data(this[a],"olddisplay")&&this[a].style.display==="none")this[a].style.display="";this[a].style.display===""&&c.css(this[a],"display")==="none"&&c.data(this[a],"olddisplay",oa(this[a].nodeName))}for(a=0;a<b;a++)this[a].style.display=c.data(this[a],"olddisplay")||"";return this}},hide:function(a,b,d){if(a||a===0)return this.animate(S("hide",3),a,b,d);else{a=0;for(b=this.length;a<b;a++){d=c.css(this[a],"display");d!=="none"&&c.data(this[a],"olddisplay",d)}for(a=
-0;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b,d){var e=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||e?this.each(function(){var f=e?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(S("toggle",3),a,b,d);return this},fadeTo:function(a,b,d,e){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d,e)},animate:function(a,b,d,e){var f=c.speed(b,d,e);if(c.isEmptyObject(a))return this.each(f.complete);
-return this[f.queue===false?"each":"queue"](function(){var h=c.extend({},f),k,l=this.nodeType===1,n=l&&c(this).is(":hidden"),s=this;for(k in a){var v=c.camelCase(k);if(k!==v){a[v]=a[k];delete a[k];k=v}if(a[k]==="hide"&&n||a[k]==="show"&&!n)return h.complete.call(this);if(l&&(k==="height"||k==="width")){h.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY];if(c.css(this,"display")==="inline"&&c.css(this,"float")==="none")if(c.support.inlineBlockNeedsLayout)if(oa(this.nodeName)===
-"inline")this.style.display="inline-block";else{this.style.display="inline";this.style.zoom=1}else this.style.display="inline-block"}if(c.isArray(a[k])){(h.specialEasing=h.specialEasing||{})[k]=a[k][1];a[k]=a[k][0]}}if(h.overflow!=null)this.style.overflow="hidden";h.curAnim=c.extend({},a);c.each(a,function(B,D){var H=new c.fx(s,h,B);if(tb.test(D))H[D==="toggle"?n?"show":"hide":D](a);else{var w=ub.exec(D),G=H.cur(true)||0;if(w){var M=parseFloat(w[2]),g=w[3]||"px";if(g!=="px"){c.style(s,B,(M||1)+g);
-G=(M||1)/H.cur(true)*G;c.style(s,B,G+g)}if(w[1])M=(w[1]==="-="?-1:1)*M+G;H.custom(G,M,g)}else H.custom(G,D,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);this.each(function(){for(var e=d.length-1;e>=0;e--)if(d[e].elem===this){b&&d[e](true);d.splice(e,1)}});b||this.dequeue();return this}});c.each({slideDown:S("show",1),slideUp:S("hide",1),slideToggle:S("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,e,f){return this.animate(b,
-d,e,f)}});c.extend({speed:function(a,b,d){var e=a&&typeof a==="object"?c.extend({},a):{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};e.duration=c.fx.off?0:typeof e.duration==="number"?e.duration:e.duration in c.fx.speeds?c.fx.speeds[e.duration]:c.fx.speeds._default;e.old=e.complete;e.complete=function(){e.queue!==false&&c(this).dequeue();c.isFunction(e.old)&&e.old.call(this)};return e},easing:{linear:function(a,b,d,e){return d+e*a},swing:function(a,b,d,e){return(-Math.cos(a*
-Math.PI)/2+0.5)*e+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||c.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a=parseFloat(c.css(this.elem,this.prop));return a&&a>-1E4?a:0},custom:function(a,b,d){function e(h){return f.step(h)}
-this.startTime=c.now();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;this.pos=this.state=0;var f=this;a=c.fx;e.elem=this.elem;if(e()&&c.timers.push(e)&&!aa)aa=setInterval(a.tick,a.interval)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true;
-this.custom(this.cur(),0)},step:function(a){var b=c.now(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var e in this.options.curAnim)if(this.options.curAnim[e]!==true)d=false;if(d){if(this.options.overflow!=null&&!c.support.shrinkWrapBlocks){var f=this.elem,h=this.options;c.each(["","X","Y"],function(l,n){f.style["overflow"+n]=h.overflow[l]})}this.options.hide&&c(this.elem).hide();if(this.options.hide||
-this.options.show)for(var k in this.options.curAnim)c.style(this.elem,k,this.options.orig[k]);this.options.complete.call(this.elem)}return false}else{a=b-this.startTime;this.state=a/this.options.duration;b=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||b](this.state,a,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a=
-c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||c.fx.stop()},interval:13,stop:function(){clearInterval(aa);aa=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a===
-b.elem}).length};var vb=/^t(?:able|d|h)$/i,Fa=/^(?:body|html)$/i;c.fn.offset="getBoundingClientRect"in u.documentElement?function(a){var b=this[0],d;if(a)return this.each(function(k){c.offset.setOffset(this,a,k)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);try{d=b.getBoundingClientRect()}catch(e){}var f=b.ownerDocument,h=f.documentElement;if(!d||!c.contains(h,b))return d||{top:0,left:0};b=f.body;f=ea(f);return{top:d.top+(f.pageYOffset||c.support.boxModel&&
-h.scrollTop||b.scrollTop)-(h.clientTop||b.clientTop||0),left:d.left+(f.pageXOffset||c.support.boxModel&&h.scrollLeft||b.scrollLeft)-(h.clientLeft||b.clientLeft||0)}}:function(a){var b=this[0];if(a)return this.each(function(s){c.offset.setOffset(this,a,s)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d=b.offsetParent,e=b.ownerDocument,f,h=e.documentElement,k=e.body;f=(e=e.defaultView)?e.getComputedStyle(b,null):b.currentStyle;
-for(var l=b.offsetTop,n=b.offsetLeft;(b=b.parentNode)&&b!==k&&b!==h;){if(c.offset.supportsFixedPosition&&f.position==="fixed")break;f=e?e.getComputedStyle(b,null):b.currentStyle;l-=b.scrollTop;n-=b.scrollLeft;if(b===d){l+=b.offsetTop;n+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&vb.test(b.nodeName))){l+=parseFloat(f.borderTopWidth)||0;n+=parseFloat(f.borderLeftWidth)||0}d=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&f.overflow!=="visible"){l+=
-parseFloat(f.borderTopWidth)||0;n+=parseFloat(f.borderLeftWidth)||0}f=f}if(f.position==="relative"||f.position==="static"){l+=k.offsetTop;n+=k.offsetLeft}if(c.offset.supportsFixedPosition&&f.position==="fixed"){l+=Math.max(h.scrollTop,k.scrollTop);n+=Math.max(h.scrollLeft,k.scrollLeft)}return{top:l,left:n}};c.offset={initialize:function(){var a=u.body,b=u.createElement("div"),d,e,f,h=parseFloat(c.css(a,"marginTop"))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",
-height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";a.insertBefore(b,a.firstChild);d=b.firstChild;e=d.firstChild;f=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=e.offsetTop!==5;this.doesAddBorderForTableAndCells=
-f.offsetTop===5;e.style.position="fixed";e.style.top="20px";this.supportsFixedPosition=e.offsetTop===20||e.offsetTop===15;e.style.position=e.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=e.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==h;a.removeChild(b);c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.css(a,
-"marginTop"))||0;d+=parseFloat(c.css(a,"marginLeft"))||0}return{top:b,left:d}},setOffset:function(a,b,d){var e=c.css(a,"position");if(e==="static")a.style.position="relative";var f=c(a),h=f.offset(),k=c.css(a,"top"),l=c.css(a,"left"),n=e==="absolute"&&c.inArray("auto",[k,l])>-1;e={};var s={};if(n)s=f.position();k=n?s.top:parseInt(k,10)||0;l=n?s.left:parseInt(l,10)||0;if(c.isFunction(b))b=b.call(a,d,h);if(b.top!=null)e.top=b.top-h.top+k;if(b.left!=null)e.left=b.left-h.left+l;"using"in b?b.using.call(a,
-e):f.css(e)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),e=Fa.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.css(a,"marginTop"))||0;d.left-=parseFloat(c.css(a,"marginLeft"))||0;e.top+=parseFloat(c.css(b[0],"borderTopWidth"))||0;e.left+=parseFloat(c.css(b[0],"borderLeftWidth"))||0;return{top:d.top-e.top,left:d.left-e.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||u.body;a&&!Fa.test(a.nodeName)&&
-c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(e){var f=this[0],h;if(!f)return null;if(e!==A)return this.each(function(){if(h=ea(this))h.scrollTo(!a?e:c(h).scrollLeft(),a?e:c(h).scrollTop());else this[d]=e});else return(h=ea(f))?"pageXOffset"in h?h[a?"pageYOffset":"pageXOffset"]:c.support.boxModel&&h.document.documentElement[d]||h.document.body[d]:f[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase();
-c.fn["inner"+b]=function(){return this[0]?parseFloat(c.css(this[0],d,"padding")):null};c.fn["outer"+b]=function(e){return this[0]?parseFloat(c.css(this[0],d,e?"margin":"border")):null};c.fn[d]=function(e){var f=this[0];if(!f)return e==null?null:this;if(c.isFunction(e))return this.each(function(h){var k=c(this);k[d](e.call(this,h,k[d]()))});return c.isWindow(f)?f.document.compatMode==="CSS1Compat"&&f.document.documentElement["client"+b]||f.document.body["client"+b]:f.nodeType===9?Math.max(f.documentElement["client"+
-b],f.body["scroll"+b],f.documentElement["scroll"+b],f.body["offset"+b],f.documentElement["offset"+b]):e===A?parseFloat(c.css(f,d)):this.css(d,typeof e==="string"?e:e+"px")}})})(window);
--- a/DebugClients/Python/coverage/htmlfiles/jquery.tablesorter.min.js	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-
-(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,cells[i]);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,node){var l=parsers.length;for(var i=1;i<l;i++){if(parsers[i].is($.trim(getElementText(table.config,node)),table,node)){return parsers[i];}}return parsers[0];}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=table.tBodies[0].rows[i],cols=[];cache.row.push($(c));for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c.cells[j]),table,c.cells[j]));}cols.push(i);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){if(!node)return"";var t="";if(config.textExtraction=="simple"){if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){t=node.childNodes[0].innerHTML;}else{t=node.innerHTML;}}else{if(typeof(config.textExtraction)=="function"){t=config.textExtraction(node);}else{t=$(node).text();}}return t;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){rows.push(r[n[i][checkCell]]);if(!table.config.appender){var o=r[n[i][checkCell]];var l=o.length;for(var j=0;j<l;j++){tableBody[0].appendChild(o[j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false,tableHeadersRows=[];for(var i=0;i<table.tHead.rows.length;i++){tableHeadersRows[i]=0;};$tableHeaders=$("thead th",table);$tableHeaders.each(function(index){this.count=0;this.column=index;this.order=formatSortingOrder(table.config.sortInitialOrder);if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(!this.sortDisabled){$(this).addClass(table.config.cssHeader);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){i=(v.toLowerCase()=="desc")?1:0;}else{i=(v==(0||1))?v:0;}return i;}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(getCachedSortType(table.config.parsers,c)=="text")?((order==0)?"sortText":"sortTextDesc"):((order==0)?"sortNumeric":"sortNumericDesc");var e="e"+i;dynamicExp+="var "+e+" = "+s+"(a["+c+"],b["+c+"]); ";dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function sortText(a,b){return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){var DECIMAL='\\'+config.decimal;var exp='/(^[+]?0('+DECIMAL+'0+)?$)|(^([-+]?[1-9][0-9]*)$)|(^([-+]?((0?|[1-9][0-9]*)'+DECIMAL+'(0*[1-9][0-9]*)))$)|(^[-+]?[1-9]+[0-9]*'+DECIMAL+'0+$)/';return RegExp(exp).test($.trim(s));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[^0-9.]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}$("tr:visible",table.tBodies[0]).filter(':even').removeClass(table.config.widgetZebra.css[1]).addClass(table.config.widgetZebra.css[0]).end().filter(':odd').removeClass(table.config.widgetZebra.css[0]).addClass(table.config.widgetZebra.css[1]);if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery);
\ No newline at end of file
Binary file DebugClients/Python/coverage/htmlfiles/keybd_closed.png has changed
Binary file DebugClients/Python/coverage/htmlfiles/keybd_open.png has changed
--- a/DebugClients/Python/coverage/htmlfiles/pyfile.html	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-<!doctype html PUBLIC "-//W3C//DTD html 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-    <meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
-    {# IE8 rounds line-height incorrectly, and adding this emulateIE7 line makes it right! #}
-    {# http://social.msdn.microsoft.com/Forums/en-US/iewebdevelopment/thread/7684445e-f080-4d8f-8529-132763348e21 #}
-    <meta http-equiv='X-UA-Compatible' content='IE=emulateIE7' />
-    <title>Coverage for {{cu.name|escape}}: {{nums.pc_covered_str}}%</title>
-    <link rel='stylesheet' href='style.css' type='text/css'>
-    {% if extra_css %}
-        <link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
-    {% endif %}
-    <script type='text/javascript' src='jquery.min.js'></script>
-    <script type='text/javascript' src='jquery.hotkeys.js'></script>
-    <script type='text/javascript' src='jquery.isonscreen.js'></script>
-    <script type='text/javascript' src='coverage_html.js'></script>
-    <script type='text/javascript' charset='utf-8'>
-        jQuery(document).ready(coverage.pyfile_ready);
-    </script>
-</head>
-<body id='pyfile'>
-
-<div id='header'>
-    <div class='content'>
-        <h1>Coverage for <b>{{cu.name|escape}}</b> :
-            <span class='pc_cov'>{{nums.pc_covered_str}}%</span>
-        </h1>
-        <img id='keyboard_icon' src='keybd_closed.png'>
-        <h2 class='stats'>
-            {{nums.n_statements}} statements &nbsp;
-            <span class='{{c_run}} shortkey_r button_toggle_run'>{{nums.n_executed}} run</span>
-            <span class='{{c_mis}} shortkey_m button_toggle_mis'>{{nums.n_missing}} missing</span>
-            <span class='{{c_exc}} shortkey_x button_toggle_exc'>{{nums.n_excluded}} excluded</span>
-            {% if arcs %}
-                <span class='{{c_par}} shortkey_p button_toggle_par'>{{nums.n_partial_branches}} partial</span>
-            {% endif %}
-        </h2>
-    </div>
-</div>
-
-<div class='help_panel'>
-    <img id='panel_icon' src='keybd_open.png'>
-    <p class='legend'>Hot-keys on this page</p>
-    <div>
-    <p class='keyhelp'>
-        <span class='key'>r</span>
-        <span class='key'>m</span>
-        <span class='key'>x</span>
-        <span class='key'>p</span> &nbsp; toggle line displays
-    </p>
-    <p class='keyhelp'>
-        <span class='key'>j</span>
-        <span class='key'>k</span> &nbsp; next/prev highlighted chunk
-    </p>
-    <p class='keyhelp'>
-        <span class='key'>0</span> &nbsp; (zero) top of page
-    </p>
-    <p class='keyhelp'>
-        <span class='key'>1</span> &nbsp; (one) first highlighted chunk
-    </p>
-    </div>
-</div>
-
-<div id='source'>
-    <table cellspacing='0' cellpadding='0'>
-        <tr>
-            <td class='linenos' valign='top'>
-                {% for line in lines %}
-                    <p id='n{{line.number}}' class='{{line.class}}'><a href='#n{{line.number}}'>{{line.number}}</a></p>
-                {% endfor %}
-            </td>
-            <td class='text' valign='top'>
-                {% for line in lines %}
-                    <p id='t{{line.number}}' class='{{line.class}}'>{% if line.annotate %}<span class='annotate' title='{{line.annotate_title}}'>{{line.annotate}}</span>{% endif %}{{line.html}}<span class='strut'>&nbsp;</span></p>
-                {% endfor %}
-            </td>
-        </tr>
-    </table>
-</div>
-
-<div id='footer'>
-    <div class='content'>
-        <p>
-            <a class='nav' href='index.html'>&#xab; index</a> &nbsp; &nbsp; <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a>
-        </p>
-    </div>
-</div>
-
-</body>
-</html>
--- a/DebugClients/Python/coverage/htmlfiles/style.css	Sat Oct 10 12:06:10 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,300 +0,0 @@
-/* CSS styles for Coverage. */
-/* Page-wide styles */
-html, body, h1, h2, h3, p, td, th {
-    margin: 0;
-    padding: 0;
-    border: 0;
-    outline: 0;
-    font-weight: inherit;
-    font-style: inherit;
-    font-size: 100%;
-    font-family: inherit;
-    vertical-align: baseline;
-    }
-
-/* Set baseline grid to 16 pt. */
-body {
-    font-family: georgia, serif;
-    font-size: 1em;
-    }
-
-html>body {
-    font-size: 16px;
-    }
-
-/* Set base font size to 12/16 */
-p {
-    font-size: .75em;           /* 12/16 */
-    line-height: 1.33333333em;  /* 16/12 */
-    }
-
-table {
-    border-collapse: collapse;
-    }
-
-a.nav {
-    text-decoration: none;
-    color: inherit;
-    }
-a.nav:hover {
-    text-decoration: underline;
-    color: inherit;
-    }
-
-/* Page structure */
-#header {
-    background: #f8f8f8;
-    width: 100%;
-    border-bottom: 1px solid #eee;
-    }
-
-#source {
-    padding: 1em;
-    font-family: "courier new", monospace;
-    }
-
-#indexfile #footer {
-    margin: 1em 3em;
-    }
-
-#pyfile #footer {
-    margin: 1em 1em;
-    }
-
-#footer .content {
-    padding: 0;
-    font-size: 85%;
-    font-family: verdana, sans-serif;
-    color: #666666;
-    font-style: italic;
-    }
-
-#index {
-    margin: 1em 0 0 3em;
-    }
-
-/* Header styles */
-#header .content {
-    padding: 1em 3em;
-    }
-
-h1 {
-    font-size: 1.25em;
-}
-
-h2.stats {
-    margin-top: .5em;
-    font-size: 1em;
-}
-.stats span {
-    border: 1px solid;
-    padding: .1em .25em;
-    margin: 0 .1em;
-    cursor: pointer;
-    border-color: #999 #ccc #ccc #999;
-}
-.stats span.hide_run, .stats span.hide_exc,
-.stats span.hide_mis, .stats span.hide_par,
-.stats span.par.hide_run.hide_par {
-    border-color: #ccc #999 #999 #ccc;
-}
-.stats span.par.hide_run {
-    border-color: #999 #ccc #ccc #999;
-}
-
-.stats span.run {
-    background: #ddffdd;
-}
-.stats span.exc {
-    background: #eeeeee;
-}
-.stats span.mis {
-    background: #ffdddd;
-}
-.stats span.hide_run {
-    background: #eeffee;
-}
-.stats span.hide_exc {
-    background: #f5f5f5;
-}
-.stats span.hide_mis {
-    background: #ffeeee;
-}
-.stats span.par {
-    background: #ffffaa;
-}
-.stats span.hide_par {
-    background: #ffffcc;
-}
-
-/* Help panel */
-#keyboard_icon {
-    float: right;
-    cursor: pointer;
-}
-
-.help_panel {
-    position: absolute;
-    background: #ffc;
-    padding: .5em;
-    border: 1px solid #883;
-    display: none;
-}
-
-#indexfile .help_panel {
-    width: 20em; height: 4em;
-}
-
-#pyfile .help_panel {
-    width: 16em; height: 8em;
-}
-
-.help_panel .legend {
-    font-style: italic;
-    margin-bottom: 1em;
-}
-
-#panel_icon {
-    float: right;
-    cursor: pointer;
-}
-
-.keyhelp {
-    margin: .75em;
-}
-
-.keyhelp .key {
-    border: 1px solid black;
-    border-color: #888 #333 #333 #888;
-    padding: .1em .35em;
-    font-family: monospace;
-    font-weight: bold;
-    background: #eee;
-}
-
-/* Source file styles */
-.linenos p {
-    text-align: right;
-    margin: 0;
-    padding: 0 .5em;
-    color: #999999;
-    font-family: verdana, sans-serif;
-    font-size: .625em;   /* 10/16 */
-    line-height: 1.6em;  /* 16/10 */
-    }
-.linenos p.highlight {
-    background: #ffdd00;
-    }
-.linenos p a {
-    text-decoration: none;
-    color: #999999;
-    }
-.linenos p a:hover {
-    text-decoration: underline;
-    color: #999999;
-    }
-
-td.text {
-    width: 100%;
-    }
-.text p {
-    margin: 0;
-    padding: 0 0 0 .5em;
-    border-left: 2px solid #ffffff;
-    white-space: nowrap;
-    }
-
-.text p.mis {
-    background: #ffdddd;
-    border-left: 2px solid #ff0000;
-    }
-.text p.run, .text p.run.hide_par {
-    background: #ddffdd;
-    border-left: 2px solid #00ff00;
-    }
-.text p.exc {
-    background: #eeeeee;
-    border-left: 2px solid #808080;
-    }
-.text p.par, .text p.par.hide_run {
-    background: #ffffaa;
-    border-left: 2px solid #eeee99;
-    }
-.text p.hide_run, .text p.hide_exc, .text p.hide_mis, .text p.hide_par,
-.text p.hide_run.hide_par {
-    background: inherit;
-    }
-
-.text span.annotate {
-    font-family: georgia;
-    font-style: italic;
-    color: #666;
-    float: right;
-    padding-right: .5em;
-    }
-.text p.hide_par span.annotate {
-    display: none;
-    }
-
-/* Syntax coloring */
-.text .com {
-    color: green;
-    font-style: italic;
-    line-height: 1px;
-    }
-.text .key {
-    font-weight: bold;
-    line-height: 1px;
-    }
-.text .str {
-    color: #000080;
-    }
-
-/* index styles */
-#index td, #index th {
-    text-align: right;
-    width: 5em;
-    padding: .25em .5em;
-    border-bottom: 1px solid #eee;
-    }
-#index th {
-    font-style: italic;
-    color: #333;
-    border-bottom: 1px solid #ccc;
-    cursor: pointer;
-    }
-#index th:hover {
-    background: #eee;
-    border-bottom: 1px solid #999;
-    }
-#index td.left, #index th.left {
-    padding-left: 0;
-    }
-#index td.right, #index th.right {
-    padding-right: 0;
-    }
-#index th.headerSortDown, #index th.headerSortUp {
-    border-bottom: 1px solid #000;
-    }
-#index td.name, #index th.name {
-    text-align: left;
-    width: auto;
-    }
-#index td.name a {
-    text-decoration: none;
-    color: #000;
-    }
-#index td.name a:hover {
-    text-decoration: underline;
-    color: #000;
-    }
-#index tr.total {
-    }
-#index tr.total td {
-    font-weight: bold;
-    border-top: 1px solid #ccc;
-    border-bottom: none;
-    }
-#index tr.file:hover {
-    background: #eeeeee;
-    }
--- a/DebugClients/Python/coverage/misc.py	Sat Oct 10 12:06:10 2015 +0200
+++ b/DebugClients/Python/coverage/misc.py	Sat Oct 10 12:44:52 2015 +0200
@@ -1,12 +1,38 @@
-"""Miscellaneous stuff for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Miscellaneous stuff for coverage.py."""
 
 import errno
+import hashlib
 import inspect
 import os
-import sys
+
+from coverage import env
+from coverage.backward import string_class, to_bytes, unicode_class
+
+
+# Use PyContracts for assertion testing on parameters and returns, but only if
+# we are running our own test suite.
+if env.TESTING:
+    from contracts import contract              # pylint: disable=unused-import
+    from contracts import new_contract
 
-from .backward import md5, sorted       # pylint: disable=W0622
-from .backward import string_class, to_bytes
+    try:
+        # Define contract words that PyContract doesn't have.
+        new_contract('bytes', lambda v: isinstance(v, bytes))
+        if env.PY3:
+            new_contract('unicode', lambda v: isinstance(v, unicode_class))
+    except ValueError:
+        # During meta-coverage, this module is imported twice, and PyContracts
+        # doesn't like redefining contracts. It's OK.
+        pass
+else:                                           # pragma: not covered
+    # We aren't using real PyContracts, so just define a no-op decorator as a
+    # stunt double.
+    def contract(**unused):
+        """Dummy no-op implementation of `contract`."""
+        return lambda func: func
 
 
 def nice_pair(pair):
@@ -42,7 +68,7 @@
     lines = sorted(lines)
     while i < len(statements) and j < len(lines):
         if statements[i] == lines[j]:
-            if start == None:
+            if start is None:
                 start = lines[j]
             end = lines[j]
             j += 1
@@ -56,25 +82,25 @@
     return ret
 
 
-def short_stack():
-    """Return a string summarizing the call stack."""
-    stack = inspect.stack()[:0:-1]
-    return "\n".join(["%30s : %s @%d" % (t[3],t[1],t[2]) for t in stack])
+def expensive(fn):
+    """A decorator to indicate that a method shouldn't be called more than once.
 
-
-def expensive(fn):
-    """A decorator to cache the result of an expensive operation.
-
-    Only applies to methods with no arguments.
+    Normally, this does nothing.  During testing, this raises an exception if
+    called more than once.
 
     """
-    attr = "_cache_" + fn.__name__
-    def _wrapped(self):
-        """Inner fn that checks the cache."""
-        if not hasattr(self, attr):
-            setattr(self, attr, fn(self))
-        return getattr(self, attr)
-    return _wrapped
+    if env.TESTING:
+        attr = "_once_" + fn.__name__
+
+        def _wrapped(self):
+            """Inner function that checks the cache."""
+            if hasattr(self, attr):
+                raise Exception("Shouldn't have called %s more than once" % fn.__name__)
+            setattr(self, attr, True)
+            return fn(self)
+        return _wrapped
+    else:
+        return fn
 
 
 def bool_or_none(b):
@@ -87,20 +113,14 @@
 
 def join_regex(regexes):
     """Combine a list of regexes into one that matches any of them."""
-    if len(regexes) > 1:
-        return "|".join(["(%s)" % r for r in regexes])
-    elif regexes:
-        return regexes[0]
-    else:
-        return ""
+    return "|".join("(?:%s)" % r for r in regexes)
 
 
 def file_be_gone(path):
     """Remove a file, and don't get annoyed if it doesn't exist."""
     try:
         os.remove(path)
-    except OSError:
-        _, e, _ = sys.exc_info()
+    except OSError as e:
         if e.errno != errno.ENOENT:
             raise
 
@@ -108,13 +128,15 @@
 class Hasher(object):
     """Hashes Python data into md5."""
     def __init__(self):
-        self.md5 = md5()
+        self.md5 = hashlib.md5()
 
     def update(self, v):
         """Add `v` to the hash, recursively if needed."""
         self.md5.update(to_bytes(str(type(v))))
         if isinstance(v, string_class):
             self.md5.update(to_bytes(v))
+        elif isinstance(v, bytes):
+            self.md5.update(v)
         elif v is None:
             pass
         elif isinstance(v, (int, float)):
@@ -137,27 +159,48 @@
                 self.update(k)
                 self.update(a)
 
-    def digest(self):
-        """Retrieve the digest of the hash."""
-        return self.md5.digest()
+    def hexdigest(self):
+        """Retrieve the hex digest of the hash."""
+        return self.md5.hexdigest()
+
+
+def _needs_to_implement(that, func_name):
+    """Helper to raise NotImplementedError in interface stubs."""
+    if hasattr(that, "_coverage_plugin_name"):
+        thing = "Plugin"
+        name = that._coverage_plugin_name
+    else:
+        thing = "Class"
+        klass = that.__class__
+        name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
+
+    raise NotImplementedError(
+        "{thing} {name!r} needs to implement {func_name}()".format(
+            thing=thing, name=name, func_name=func_name
+            )
+        )
 
 
 class CoverageException(Exception):
-    """An exception specific to Coverage."""
+    """An exception specific to coverage.py."""
     pass
 
+
 class NoSource(CoverageException):
     """We couldn't find the source for a module."""
     pass
 
+
 class NoCode(NoSource):
     """We couldn't find any code at all."""
     pass
 
+
 class NotPython(CoverageException):
     """A source file turned out not to be parsable Python."""
     pass
 
+
 class ExceptionDuringRun(CoverageException):
     """An exception happened while running customer code.
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/DebugClients/Python/coverage/monkey.py	Sat Oct 10 12:44:52 2015 +0200
@@ -0,0 +1,50 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Monkey-patching to make coverage.py work right in some cases."""
+
+import multiprocessing
+import multiprocessing.process
+import sys
+
+# An attribute that will be set on modules to indicate that they have been
+# monkey-patched.
+PATCHED_MARKER = "_coverage$patched"
+
+
+def patch_multiprocessing():
+    """Monkey-patch the multiprocessing module.
+
+    This enables coverage measurement of processes started by multiprocessing.
+    This is wildly experimental!
+
+    """
+    if hasattr(multiprocessing, PATCHED_MARKER):
+        return
+
+    if sys.version_info >= (3, 4):
+        klass = multiprocessing.process.BaseProcess
+    else:
+        klass = multiprocessing.Process
+
+    original_bootstrap = klass._bootstrap
+
+    class ProcessWithCoverage(klass):
+        """A replacement for multiprocess.Process that starts coverage."""
+        def _bootstrap(self):
+            """Wrapper around _bootstrap to start coverage."""
+            from coverage import Coverage
+            cov = Coverage(data_suffix=True)
+            cov.start()
+            try:
+                return original_bootstrap(self)
+            finally:
+                cov.stop()
+                cov.save()