Merged with default branch. jsonrpc

Sun, 04 Sep 2016 13:48:29 +0200

author
Detlev Offenbach <detlev@die-offenbachs.de>
date
Sun, 04 Sep 2016 13:48:29 +0200
branch
jsonrpc
changeset 5142
ffa4ef43f924
parent 5140
01484c0afbc6 (diff)
parent 5126
d28b92dabc2b (current diff)
child 5143
138759ea4419

Merged with default branch.

Debugger/DebugUI.py file | annotate | diff | comparison | revisions
--- a/DebugClients/Python/AsyncFile.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,304 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2002 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing an asynchronous file like socket interface for the
-debugger.
-"""
-
-import socket
-
-from DebugProtocol import EOT
-
-
-def AsyncPendingWrite(file):
-    """
-    Module function to check for data to be written.
-    
-    @param file The file object to be checked (file)
-    @return Flag indicating if there is data wating (int)
-    """
-    try:
-        pending = file.pendingWrite()
-    except Exception:
-        pending = 0
-
-    return pending
-
-
-class AsyncFile(object):
-    """
-    Class wrapping a socket object with a file interface.
-    """
-    maxtries = 10
-    maxbuffersize = 1024 * 1024 * 4
-    
-    def __init__(self, sock, mode, name):
-        """
-        Constructor
-        
-        @param sock the socket object being wrapped
-        @param mode mode of this file (string)
-        @param name name of this file (string)
-        """
-        # Initialise the attributes.
-        self.closed = 0
-        self.sock = sock
-        self.mode = mode
-        self.name = name
-        self.nWriteErrors = 0
-        self.encoding = "utf-8"
-
-        self.wpending = u''
-
-    def __checkMode(self, mode):
-        """
-        Private method to check the mode.
-        
-        This method checks, if an operation is permitted according to
-        the mode of the file. If it is not, an IOError is raised.
-        
-        @param mode the mode to be checked (string)
-        @exception IOError raised to indicate a bad file descriptor
-        """
-        if mode != self.mode:
-            raise IOError('[Errno 9] Bad file descriptor')
-
-    def __nWrite(self, n):
-        """
-        Private method to write a specific number of pending bytes.
-        
-        @param n the number of bytes to be written (int)
-        """
-        if n:
-            try:
-                buf = "%s%s" % (self.wpending[:n], EOT)
-                try:
-                    buf = buf.encode('utf-8')
-                except (UnicodeEncodeError, UnicodeDecodeError):
-                    pass
-                self.sock.sendall(buf)
-                self.wpending = self.wpending[n:]
-                self.nWriteErrors = 0
-            except socket.error:
-                self.nWriteErrors += 1
-                if self.nWriteErrors > self.maxtries:
-                    self.wpending = u''  # delete all output
-
-    def pendingWrite(self):
-        """
-        Public method that returns the number of bytes waiting to be written.
-        
-        @return the number of bytes to be written (int)
-        """
-        return self.wpending.rfind('\n') + 1
-
-    def close(self, closeit=0):
-        """
-        Public method to close the file.
-        
-        @param closeit flag to indicate a close ordered by the debugger code
-            (boolean)
-        """
-        if closeit and not self.closed:
-            self.flush()
-            self.sock.close()
-            self.closed = 1
-
-    def flush(self):
-        """
-        Public method to write all pending bytes.
-        """
-        self.__nWrite(len(self.wpending))
-
-    def isatty(self):
-        """
-        Public method to indicate whether a tty interface is supported.
-        
-        @return always false
-        """
-        return 0
-
-    def fileno(self):
-        """
-        Public method returning the file number.
-        
-        @return file number (int)
-        """
-        try:
-            return self.sock.fileno()
-        except socket.error:
-            return -1
-
-    def read_p(self, size=-1):
-        """
-        Public method to read bytes from this file.
-        
-        @param size maximum number of bytes to be read (int)
-        @return the bytes read (any)
-        """
-        self.__checkMode('r')
-
-        if size < 0:
-            size = 20000
-
-        return self.sock.recv(size).decode('utf8')
-
-    def read(self, size=-1):
-        """
-        Public method to read bytes from this file.
-        
-        @param size maximum number of bytes to be read (int)
-        @return the bytes read (any)
-        """
-        self.__checkMode('r')
-
-        buf = raw_input()
-        if size >= 0:
-            buf = buf[:size]
-        return buf
-
-    def readline_p(self, size=-1):
-        """
-        Public method to read a line from this file.
-        
-        <b>Note</b>: This method will not block and may return
-        only a part of a line if that is all that is available.
-        
-        @param size maximum number of bytes to be read (int)
-        @return one line of text up to size bytes (string)
-        """
-        self.__checkMode('r')
-
-        if size < 0:
-            size = 20000
-
-        # The integration of the debugger client event loop and the connection
-        # to the debugger relies on the two lines of the debugger command being
-        # delivered as two separate events.  Therefore we make sure we only
-        # read a line at a time.
-        line = self.sock.recv(size, socket.MSG_PEEK)
-
-        eol = line.find('\n')
-
-        if eol >= 0:
-            size = eol + 1
-        else:
-            size = len(line)
-
-        # Now we know how big the line is, read it for real.
-        return self.sock.recv(size).decode('utf8')
-
-    def readlines(self, sizehint=-1):
-        """
-        Public method to read all lines from this file.
-        
-        @param sizehint hint of the numbers of bytes to be read (int)
-        @return list of lines read (list of strings)
-        """
-        self.__checkMode('r')
-
-        lines = []
-        room = sizehint
-
-        line = self.readline_p(room)
-        linelen = len(line)
-
-        while linelen > 0:
-            lines.append(line)
-
-            if sizehint >= 0:
-                room = room - linelen
-
-                if room <= 0:
-                    break
-
-            line = self.readline_p(room)
-            linelen = len(line)
-
-        return lines
-
-    def readline(self, sizehint=-1):
-        """
-        Public method to read one line from this file.
-        
-        @param sizehint hint of the numbers of bytes to be read (int)
-        @return one line read (string)
-        """
-        self.__checkMode('r')
-
-        line = raw_input() + '\n'
-        if sizehint >= 0:
-            line = line[:sizehint]
-        return line
-        
-    def seek(self, offset, whence=0):
-        """
-        Public method to move the filepointer.
-        
-        @param offset offset to seek for
-        @param whence where to seek from
-        @exception IOError This method is not supported and always raises an
-        IOError.
-        """
-        raise IOError('[Errno 29] Illegal seek')
-
-    def tell(self):
-        """
-        Public method to get the filepointer position.
-        
-        @exception IOError This method is not supported and always raises an
-        IOError.
-        """
-        raise IOError('[Errno 29] Illegal seek')
-
-    def truncate(self, size=-1):
-        """
-        Public method to truncate the file.
-        
-        @param size size to truncate to (integer)
-        @exception IOError This method is not supported and always raises an
-        IOError.
-        """
-        raise IOError('[Errno 29] Illegal seek')
-
-    def write(self, s):
-        """
-        Public method to write a string to the file.
-        
-        @param s bytes to be written (string)
-        @exception socket.error raised to indicate too many send attempts
-        """
-        self.__checkMode('w')
-        tries = 0
-        if not self.wpending:
-            self.wpending = s
-        elif type(self.wpending) != type(s) or \
-                len(self.wpending) + len(s) > self.maxbuffersize:
-            # flush wpending so that different string types are not
-            # concatenated
-            while self.wpending:
-                # if we have a persistent error in sending the data, an
-                # exception will be raised in __nWrite
-                self.flush()
-                tries += 1
-                if tries > self.maxtries:
-                    raise socket.error("Too many attempts to send data")
-            self.wpending = s
-        else:
-            self.wpending += s
-        self.__nWrite(self.pendingWrite())
-
-    def writelines(self, list):
-        """
-        Public method to write a list of strings to the file.
-        
-        @param list the list to be written (list of string)
-        """
-        map(self.write, list)
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/AsyncIO.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2002 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing a base class of an asynchronous interface for the debugger.
-"""
-
-
-class AsyncIO(object):
-    """
-    Class implementing asynchronous reading and writing.
-    """
-    def __init__(self):
-        """
-        Constructor
-        """
-        # There is no connection yet.
-        self.disconnect()
-
-    def disconnect(self):
-        """
-        Public method to disconnect any current connection.
-        """
-        self.readfd = None
-        self.writefd = None
-
-    def setDescriptors(self, rfd, wfd):
-        """
-        Public method called to set the descriptors for the connection.
-        
-        @param rfd file descriptor of the input file (int)
-        @param wfd file descriptor of the output file (int)
-        """
-        self.rbuf = ''
-        self.readfd = rfd
-
-        self.wbuf = ''
-        self.writefd = wfd
-
-    def readReady(self, fd):
-        """
-        Public method called when there is data ready to be read.
-        
-        @param fd file descriptor of the file that has data to be read (int)
-        """
-        try:
-            got = self.readfd.readline_p()
-        except Exception:
-            return
-
-        if len(got) == 0:
-            self.sessionClose()
-            return
-
-        self.rbuf = self.rbuf + got
-
-        # Call handleLine for the line if it is complete.
-        eol = self.rbuf.find('\n')
-
-        while eol >= 0:
-            s = self.rbuf[:eol + 1]
-            self.rbuf = self.rbuf[eol + 1:]
-            self.handleLine(s)
-            eol = self.rbuf.find('\n')
-
-    def writeReady(self, fd):
-        """
-        Public method called when we are ready to write data.
-        
-        @param fd file descriptor of the file that has data to be written (int)
-        """
-        self.writefd.write(self.wbuf)
-        self.writefd.flush()
-        self.wbuf = ''
-
-    def write(self, s):
-        """
-        Public method to write a string.
-        
-        @param s the data to be written (string)
-        """
-        self.wbuf = self.wbuf + s
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DCTestResult.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2003 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing a TestResult derivative for the eric6 debugger.
-"""
-
-import select
-from unittest import TestResult
-
-
-from DebugProtocol import ResponseUTTestFailed, ResponseUTTestErrored, \
-    ResponseUTStartTest, ResponseUTStopTest, ResponseUTTestSkipped, \
-    ResponseUTTestFailedExpected, ResponseUTTestSucceededUnexpected
-
-
-class DCTestResult(TestResult):
-    """
-    A TestResult derivative to work with eric6's debug client.
-    
-    For more details see unittest.py of the standard python distribution.
-    """
-    def __init__(self, parent):
-        """
-        Constructor
-        
-        @param parent The parent widget.
-        """
-        TestResult.__init__(self)
-        self.parent = parent
-        
-    def addFailure(self, test, err):
-        """
-        Public method called if a test failed.
-        
-        @param test Reference to the test object
-        @param err The error traceback
-        """
-        TestResult.addFailure(self, test, err)
-        tracebackLines = self._exc_info_to_string(err, test)
-        self.parent.write(
-            '%s%s\n' % (
-                ResponseUTTestFailed,
-                unicode((unicode(test), tracebackLines, test.id()))))
-        
-    def addError(self, test, err):
-        """
-        Public method called if a test errored.
-        
-        @param test Reference to the test object
-        @param err The error traceback
-        """
-        TestResult.addError(self, test, err)
-        tracebackLines = self._exc_info_to_string(err, test)
-        self.parent.write(
-            '%s%s\n' % (
-                ResponseUTTestErrored,
-                unicode((unicode(test), tracebackLines, test.id()))))
-        
-    def addSkip(self, test, reason):
-        """
-        Public method called if a test was skipped.
-        
-        @param test reference to the test object
-        @param reason reason for skipping the test (string)
-        """
-        TestResult.addSkip(self, test, reason)
-        self.parent.write(
-            '%s%s\n' % (
-                ResponseUTTestSkipped,
-                str((str(test), reason, test.id()))))
-        
-    def addExpectedFailure(self, test, err):
-        """
-        Public method called if a test failed expected.
-        
-        @param test reference to the test object
-        @param err error traceback
-        """
-        TestResult.addExpectedFailure(self, test, err)
-        tracebackLines = self._exc_info_to_string(err, test)
-        self.parent.write(
-            '%s%s\n' % (
-                ResponseUTTestFailedExpected,
-                str((str(test), tracebackLines, test.id()))))
-        
-    def addUnexpectedSuccess(self, test):
-        """
-        Public method called if a test succeeded expectedly.
-        
-        @param test reference to the test object
-        """
-        TestResult.addUnexpectedSuccess(self, test)
-        self.parent.write(
-            '%s%s\n' % (
-                ResponseUTTestSucceededUnexpected,
-                str((str(test), test.id()))))
-        
-    def startTest(self, test):
-        """
-        Public method called at the start of a test.
-        
-        @param test Reference to the test object
-        """
-        TestResult.startTest(self, test)
-        self.parent.write(
-            '%s%s\n' % (
-                ResponseUTStartTest,
-                unicode((unicode(test), test.shortDescription()))))
-
-    def stopTest(self, test):
-        """
-        Public method called at the end of a test.
-        
-        @param test Reference to the test object
-        """
-        TestResult.stopTest(self, test)
-        self.parent.write('%s\n' % ResponseUTStopTest)
-        
-        # ensure that pending input is processed
-        rrdy, wrdy, xrdy = select.select([self.parent.readstream], [], [],
-                                         0.01)
-
-        if self.parent.readstream in rrdy:
-            self.parent.readReady(self.parent.readstream.fileno())
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DebugBase.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,860 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2002 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing the debug base class.
-"""
-
-import sys
-import bdb
-import os
-import types
-import atexit
-import inspect
-import ctypes
-
-from DebugProtocol import ResponseClearWatch, ResponseClearBreak, \
-    ResponseLine, ResponseSyntax, ResponseException, CallTrace
-
-gRecursionLimit = 64
-
-
-def printerr(s):
-    """
-    Module function used for debugging the debug client.
-    
-    @param s data to be printed
-    """
-    sys.__stderr__.write('%s\n' % unicode(s))
-    sys.__stderr__.flush()
-
-
-def setRecursionLimit(limit):
-    """
-    Module function to set the recursion limit.
-    
-    @param limit recursion limit (integer)
-    """
-    global gRecursionLimit
-    gRecursionLimit = limit
-
-
-class DebugBase(bdb.Bdb):
-    """
-    Class implementing base class of the debugger.
-
-    Provides simple wrapper methods around bdb for the 'owning' client to
-    call to step etc.
-    """
-    def __init__(self, dbgClient):
-        """
-        Constructor
-        
-        @param dbgClient the owning client
-        """
-        bdb.Bdb.__init__(self)
-
-        self._dbgClient = dbgClient
-        self._mainThread = 1
-        
-        self.breaks = self._dbgClient.breakpoints
-        
-        self.__event = ""
-        self.__isBroken = ""
-        self.cFrame = None
-        
-        # current frame we are at
-        self.currentFrame = None
-        
-        # frame that we are stepping in, can be different than currentFrame
-        self.stepFrame = None
-        
-        # provide a hook to perform a hard breakpoint
-        # Use it like this:
-        # if hasattr(sys, 'breakpoint): sys.breakpoint()
-        sys.breakpoint = self.set_trace
-        
-        # initialize parent
-        bdb.Bdb.reset(self)
-        
-        self.__recursionDepth = -1
-        self.setRecursionDepth(inspect.currentframe())
-    
-    def getCurrentFrame(self):
-        """
-        Public method to return the current frame.
-        
-        @return the current frame
-        """
-        return self.currentFrame
-    
-    def getFrameLocals(self, frmnr=0):
-        """
-        Public method to return the locals dictionary of the current frame
-        or a frame below.
-        
-        @keyparam frmnr distance of frame to get locals dictionary of. 0 is
-            the current frame (int)
-        @return locals dictionary of the frame
-        """
-        f = self.currentFrame
-        while f is not None and frmnr > 0:
-            f = f.f_back
-            frmnr -= 1
-        return f.f_locals
-    
-    def storeFrameLocals(self, frmnr=0):
-        """
-        Public method to store the locals into the frame, so an access to
-        frame.f_locals returns the last data.
-        
-        @keyparam frmnr distance of frame to store locals dictionary to. 0 is
-            the current frame (int)
-        """
-        cf = self.currentFrame
-        while cf is not None and frmnr > 0:
-            cf = cf.f_back
-            frmnr -= 1
-        ctypes.pythonapi.PyFrame_LocalsToFast(
-            ctypes.py_object(cf),
-            ctypes.c_int(0))
-    
-    def step(self, traceMode):
-        """
-        Public method to perform a step operation in this thread.
-        
-        @param traceMode If it is non-zero, then the step is a step into,
-              otherwise it is a step over.
-        """
-        self.stepFrame = self.currentFrame
-        
-        if traceMode:
-            self.currentFrame = None
-            self.set_step()
-        else:
-            self.set_next(self.currentFrame)
-    
-    def stepOut(self):
-        """
-        Public method to perform a step out of the current call.
-        """
-        self.stepFrame = self.currentFrame
-        self.set_return(self.currentFrame)
-    
-    def go(self, special):
-        """
-        Public method to resume the thread.
-
-        It resumes the thread stopping only at breakpoints or exceptions.
-        
-        @param special flag indicating a special continue operation
-        """
-        self.currentFrame = None
-        self.set_continue(special)
-    
-    def setRecursionDepth(self, frame):
-        """
-        Public method to determine the current recursion depth.
-        
-        @param frame The current stack frame.
-        """
-        self.__recursionDepth = 0
-        while frame is not None:
-            self.__recursionDepth += 1
-            frame = frame.f_back
-    
-    def profile(self, frame, event, arg):
-        """
-        Public method used to trace some stuff independent of the debugger
-        trace function.
-        
-        @param frame current stack frame.
-        @param event trace event (string)
-        @param arg arguments
-        @exception RuntimeError raised to indicate too many recursions
-        """
-        if event == 'return':
-            self.cFrame = frame.f_back
-            self.__recursionDepth -= 1
-            self.__sendCallTrace(event, frame, self.cFrame)
-        elif event == 'call':
-            self.__sendCallTrace(event, self.cFrame, frame)
-            self.cFrame = frame
-            self.__recursionDepth += 1
-            if self.__recursionDepth > gRecursionLimit:
-                raise RuntimeError(
-                    'maximum recursion depth exceeded\n'
-                    '(offending frame is two down the stack)')
-    
-    def __sendCallTrace(self, event, fromFrame, toFrame):
-        """
-        Private method to send a call/return trace.
-        
-        @param event trace event (string)
-        @param fromFrame originating frame (frame)
-        @param toFrame destination frame (frame)
-        """
-        if self._dbgClient.callTraceEnabled:
-            if not self.__skip_it(fromFrame) and not self.__skip_it(toFrame):
-                if event in ["call", "return"]:
-                    fr = fromFrame
-                    fromStr = "%s:%s:%s" % (
-                        self._dbgClient.absPath(self.fix_frame_filename(fr)),
-                        fr.f_lineno,
-                        fr.f_code.co_name)
-                    fr = toFrame
-                    toStr = "%s:%s:%s" % (
-                        self._dbgClient.absPath(self.fix_frame_filename(fr)),
-                        fr.f_lineno,
-                        fr.f_code.co_name)
-                    self._dbgClient.write("%s%s@@%s@@%s\n" % (
-                        CallTrace, event[0], fromStr, toStr))
-    
-    def trace_dispatch(self, frame, event, arg):
-        """
-        Public method reimplemented from bdb.py to do some special things.
-        
-        This specialty is to check the connection to the debug server
-        for new events (i.e. new breakpoints) while we are going through
-        the code.
-        
-        @param frame The current stack frame.
-        @param event The trace event (string)
-        @param arg The arguments
-        @return local trace function
-        """
-        if self.quitting:
-            return  # None
-        
-        # give the client a chance to push through new break points.
-        self._dbgClient.eventPoll()
-        
-        self.__event == event
-        self.__isBroken = False
-        
-        if event == 'line':
-            return self.dispatch_line(frame)
-        if event == 'call':
-            return self.dispatch_call(frame, arg)
-        if event == 'return':
-            return self.dispatch_return(frame, arg)
-        if event == 'exception':
-            return self.dispatch_exception(frame, arg)
-        if event == 'c_call':
-            return self.trace_dispatch
-        if event == 'c_exception':
-            return self.trace_dispatch
-        if event == 'c_return':
-            return self.trace_dispatch
-        print 'DebugBase.trace_dispatch: unknown debugging event:', repr(event) # __IGNORE_WARNING__
-        return self.trace_dispatch
-
-    def dispatch_line(self, frame):
-        """
-        Public method reimplemented from bdb.py to do some special things.
-        
-        This speciality is to check the connection to the debug server
-        for new events (i.e. new breakpoints) while we are going through
-        the code.
-        
-        @param frame The current stack frame.
-        @return local trace function
-        @exception bdb.BdbQuit raised to indicate the end of the debug session
-        """
-        if self.stop_here(frame) or self.break_here(frame):
-            self.user_line(frame)
-            if self.quitting:
-                raise bdb.BdbQuit
-        return self.trace_dispatch
-
-    def dispatch_return(self, frame, arg):
-        """
-        Public method reimplemented from bdb.py to handle passive mode cleanly.
-        
-        @param frame The current stack frame.
-        @param arg The arguments
-        @return local trace function
-        @exception bdb.BdbQuit raised to indicate the end of the debug session
-        """
-        if self.stop_here(frame) or frame == self.returnframe:
-            self.user_return(frame, arg)
-            if self.quitting and not self._dbgClient.passive:
-                raise bdb.BdbQuit
-        return self.trace_dispatch
-
-    def dispatch_exception(self, frame, arg):
-        """
-        Public method reimplemented from bdb.py to always call user_exception.
-        
-        @param frame The current stack frame.
-        @param arg The arguments
-        @return local trace function
-        @exception bdb.BdbQuit raised to indicate the end of the debug session
-        """
-        if not self.__skip_it(frame):
-            self.user_exception(frame, arg)
-            if self.quitting:
-                raise bdb.BdbQuit
-        return self.trace_dispatch
-
-    def set_trace(self, frame=None):
-        """
-        Public method reimplemented from bdb.py to do some special setup.
-        
-        @param frame frame to start debugging from
-        """
-        bdb.Bdb.set_trace(self, frame)
-        sys.setprofile(self.profile)
-    
-    def set_continue(self, special):
-        """
-        Public method reimplemented from bdb.py to always get informed of
-        exceptions.
-        
-        @param special flag indicating a special continue operation
-        """
-        # Modified version of the one found in bdb.py
-        # Here we only set a new stop frame if it is a normal continue.
-        if not special:
-            self.stopframe = self.botframe
-        self.returnframe = None
-        self.quitting = 0
-
-    def set_quit(self):
-        """
-        Public method to quit.
-        
-        It wraps call to bdb to clear the current frame properly.
-        """
-        self.currentFrame = None
-        sys.setprofile(None)
-        bdb.Bdb.set_quit(self)
-    
-    def fix_frame_filename(self, frame):
-        """
-        Public method used to fixup the filename for a given frame.
-        
-        The logic employed here is that if a module was loaded
-        from a .pyc file, then the correct .py to operate with
-        should be in the same path as the .pyc. The reason this
-        logic is needed is that when a .pyc file is generated, the
-        filename embedded and thus what is readable in the code object
-        of the frame object is the fully qualified filepath when the
-        pyc is generated. If files are moved from machine to machine
-        this can break debugging as the .pyc will refer to the .py
-        on the original machine. Another case might be sharing
-        code over a network... This logic deals with that.
-        
-        @param frame the frame object
-        @return fixed up file name (string)
-        """
-        # get module name from __file__
-        if '__file__' in frame.f_globals and \
-           frame.f_globals['__file__'] and \
-           frame.f_globals['__file__'] == frame.f_code.co_filename:
-            root, ext = os.path.splitext(frame.f_globals['__file__'])
-            if ext == '.pyc' or ext == '.py' or ext == '.pyo':
-                fixedName = root + '.py'
-                if os.path.exists(fixedName):
-                    return fixedName
-
-        return frame.f_code.co_filename
-
-    def set_watch(self, cond, temporary=0):
-        """
-        Public method to set a watch expression.
-        
-        @param cond expression of the watch expression (string)
-        @param temporary flag indicating a temporary watch expression (boolean)
-        """
-        bp = bdb.Breakpoint("Watch", 0, temporary, cond)
-        if cond.endswith('??created??') or cond.endswith('??changed??'):
-            bp.condition, bp.special = cond.split()
-        else:
-            bp.condition = cond
-            bp.special = ""
-        bp.values = {}
-        if "Watch" not in self.breaks:
-            self.breaks["Watch"] = 1
-        else:
-            self.breaks["Watch"] += 1
-    
-    def clear_watch(self, cond):
-        """
-        Public method to clear a watch expression.
-        
-        @param cond expression of the watch expression to be cleared (string)
-        """
-        try:
-            possibles = bdb.Breakpoint.bplist["Watch", 0]
-            for i in range(0, len(possibles)):
-                b = possibles[i]
-                if b.cond == cond:
-                    b.deleteMe()
-                    self.breaks["Watch"] -= 1
-                    if self.breaks["Watch"] == 0:
-                        del self.breaks["Watch"]
-                    break
-        except KeyError:
-            pass
-    
-    def get_watch(self, cond):
-        """
-        Public method to get a watch expression.
-        
-        @param cond expression of the watch expression to be cleared (string)
-        @return reference to the watch point
-        """
-        possibles = bdb.Breakpoint.bplist["Watch", 0]
-        for i in range(0, len(possibles)):
-            b = possibles[i]
-            if b.cond == cond:
-                return b
-    
-    def __do_clearWatch(self, cond):
-        """
-        Private method called to clear a temporary watch expression.
-        
-        @param cond expression of the watch expression to be cleared (string)
-        """
-        self.clear_watch(cond)
-        self._dbgClient.write('%s%s\n' % (ResponseClearWatch, cond))
-
-    def __effective(self, frame):
-        """
-        Private method to determine, if a watch expression is effective.
-        
-        @param frame the current execution frame
-        @return tuple of watch expression and a flag to indicate, that a
-            temporary watch expression may be deleted (bdb.Breakpoint, boolean)
-        """
-        possibles = bdb.Breakpoint.bplist["Watch", 0]
-        for i in range(0, len(possibles)):
-            b = possibles[i]
-            if b.enabled == 0:
-                continue
-            if not b.cond:
-                # watch expression without expression shouldn't occur,
-                # just ignore it
-                continue
-            try:
-                val = eval(b.condition, frame.f_globals, frame.f_locals)
-                if b.special:
-                    if b.special == '??created??':
-                        if b.values[frame][0] == 0:
-                            b.values[frame][0] = 1
-                            b.values[frame][1] = val
-                            return (b, 1)
-                        else:
-                            continue
-                    b.values[frame][0] = 1
-                    if b.special == '??changed??':
-                        if b.values[frame][1] != val:
-                            b.values[frame][1] = val
-                            if b.values[frame][2] > 0:
-                                b.values[frame][2] -= 1
-                                continue
-                            else:
-                                return (b, 1)
-                        else:
-                            continue
-                    continue
-                if val:
-                    if b.ignore > 0:
-                        b.ignore -= 1
-                        continue
-                    else:
-                        return (b, 1)
-            except Exception:
-                if b.special:
-                    try:
-                        b.values[frame][0] = 0
-                    except KeyError:
-                        b.values[frame] = [0, None, b.ignore]
-                continue
-        return (None, None)
-    
-    def break_here(self, frame):
-        """
-        Public method reimplemented from bdb.py to fix the filename from the
-        frame.
-        
-        See fix_frame_filename for more info.
-        
-        @param frame the frame object
-        @return flag indicating the break status (boolean)
-        """
-        filename = self.canonic(self.fix_frame_filename(frame))
-        if filename not in self.breaks and "Watch" not in self.breaks:
-            return 0
-        
-        if filename in self.breaks:
-            lineno = frame.f_lineno
-            if lineno in self.breaks[filename]:
-                # flag says ok to delete temp. bp
-                (bp, flag) = bdb.effective(filename, lineno, frame)
-                if bp:
-                    self.currentbp = bp.number
-                    if (flag and bp.temporary):
-                        self.__do_clear(filename, lineno)
-                    return 1
-        
-        if "Watch" in self.breaks:
-            # flag says ok to delete temp. bp
-            (bp, flag) = self.__effective(frame)
-            if bp:
-                self.currentbp = bp.number
-                if (flag and bp.temporary):
-                    self.__do_clearWatch(bp.cond)
-                return 1
-        
-        return 0
-
-    def break_anywhere(self, frame):
-        """
-        Public method reimplemented from bdb.py to do some special things.
-        
-        These speciality is to fix the filename from the frame
-        (see fix_frame_filename for more info).
-        
-        @param frame the frame object
-        @return flag indicating the break status (boolean)
-        """
-        return \
-            self.canonic(self.fix_frame_filename(frame)) in self.breaks or \
-            ("Watch" in self.breaks and self.breaks["Watch"])
-
-    def get_break(self, filename, lineno):
-        """
-        Public method reimplemented from bdb.py to get the first breakpoint of
-        a particular line.
-        
-        Because eric6 supports only one breakpoint per line, this overwritten
-        method will return this one and only breakpoint.
-        
-        @param filename filename of the bp to retrieve (string)
-        @param lineno linenumber of the bp to retrieve (integer)
-        @return breakpoint or None, if there is no bp
-        """
-        filename = self.canonic(filename)
-        return filename in self.breaks and \
-            lineno in self.breaks[filename] and \
-            bdb.Breakpoint.bplist[filename, lineno][0] or None
-    
-    def __do_clear(self, filename, lineno):
-        """
-        Private method called to clear a temporary breakpoint.
-        
-        @param filename name of the file the bp belongs to
-        @param lineno linenumber of the bp
-        """
-        self.clear_break(filename, lineno)
-        self._dbgClient.write('%s%s,%d\n' % (ResponseClearBreak, filename,
-                                             lineno))
-
-    def getStack(self):
-        """
-        Public method to get the stack.
-        
-        @return list of lists with file name (string), line number (integer)
-            and function name (string)
-        """
-        fr = self.cFrame
-        stack = []
-        while fr is not None:
-            fname = self._dbgClient.absPath(self.fix_frame_filename(fr))
-            if not fname.startswith("<"):
-                fline = fr.f_lineno
-                ffunc = fr.f_code.co_name
-                
-                if ffunc == '?':
-                    ffunc = ''
-            
-                if ffunc and not ffunc.startswith("<"):
-                    argInfo = inspect.getargvalues(fr)
-                    try:
-                        fargs = inspect.formatargvalues(argInfo[0], argInfo[1],
-                                                        argInfo[2], argInfo[3])
-                    except Exception:
-                        fargs = ""
-                else:
-                    fargs = ""
-                
-            stack.append([fname, fline, ffunc, fargs])
-            
-            if fr == self._dbgClient.mainFrame:
-                fr = None
-            else:
-                fr = fr.f_back
-        
-        return stack
-    
-    def user_line(self, frame):
-        """
-        Public method reimplemented to handle the program about to execute a
-        particular line.
-        
-        @param frame the frame object
-        """
-        line = frame.f_lineno
-
-        # We never stop on line 0.
-        if line == 0:
-            return
-
-        fn = self._dbgClient.absPath(self.fix_frame_filename(frame))
-
-        # See if we are skipping at the start of a newly loaded program.
-        if self._dbgClient.mainFrame is None:
-            if fn != self._dbgClient.getRunning():
-                return
-            fr = frame
-            while (fr is not None and
-                    fr.f_code != self._dbgClient.handleLine.func_code):
-                self._dbgClient.mainFrame = fr
-                fr = fr.f_back
-
-        self.currentFrame = frame
-        
-        fr = frame
-        stack = []
-        while fr is not None:
-            # Reset the trace function so we can be sure
-            # to trace all functions up the stack... This gets around
-            # problems where an exception/breakpoint has occurred
-            # but we had disabled tracing along the way via a None
-            # return from dispatch_call
-            fr.f_trace = self.trace_dispatch
-            fname = self._dbgClient.absPath(self.fix_frame_filename(fr))
-            if not fname.startswith("<"):
-                fline = fr.f_lineno
-                ffunc = fr.f_code.co_name
-                
-                if ffunc == '?':
-                    ffunc = ''
-                
-                if ffunc and not ffunc.startswith("<"):
-                    argInfo = inspect.getargvalues(fr)
-                    try:
-                        fargs = inspect.formatargvalues(argInfo[0], argInfo[1],
-                                                        argInfo[2], argInfo[3])
-                    except Exception:
-                        fargs = ""
-                else:
-                    fargs = ""
-                
-            stack.append([fname, fline, ffunc, fargs])
-            
-            if fr == self._dbgClient.mainFrame:
-                fr = None
-            else:
-                fr = fr.f_back
-        
-        self.__isBroken = True
-        
-        self._dbgClient.write('%s%s\n' % (ResponseLine, unicode(stack)))
-        self._dbgClient.eventLoop()
-
-    def user_exception(self, frame, (exctype, excval, exctb), unhandled=0):
-        """
-        Public method reimplemented to report an exception to the debug server.
-        
-        @param frame the frame object
-        @param exctype the type of the exception
-        @param excval data about the exception
-        @param exctb traceback for the exception
-        @param unhandled flag indicating an uncaught exception
-        """
-        if exctype in [GeneratorExit, StopIteration]:
-            # ignore these
-            return
-        
-        if exctype in [SystemExit, bdb.BdbQuit]:
-            atexit._run_exitfuncs()
-            if excval is None:
-                excval = 0
-            elif isinstance(excval, (unicode, str)):
-                self._dbgClient.write(excval)
-                excval = 1
-            if isinstance(excval, int):
-                self._dbgClient.progTerminated(excval)
-            else:
-                self._dbgClient.progTerminated(excval.code)
-            return
-        
-        if exctype in [SyntaxError, IndentationError]:
-            try:
-                message, (filename, linenr, charnr, text) = excval
-            except ValueError:
-                exclist = []
-                realSyntaxError = True
-            else:
-                exclist = [message, [filename, linenr, charnr]]
-                realSyntaxError = os.path.exists(filename)
-            
-            if realSyntaxError:
-                self._dbgClient.write("%s%s\n" % (ResponseSyntax,
-                                                  unicode(exclist)))
-                self._dbgClient.eventLoop()
-                return
-        
-        if type(exctype) in [types.ClassType,   # Python up to 2.4
-                             types.TypeType]:   # Python 2.5+
-            exctype = exctype.__name__
-        
-        if excval is None:
-            excval = ''
-        
-        if unhandled:
-            exctypetxt = "unhandled %s" % unicode(exctype)
-        else:
-            exctypetxt = unicode(exctype)
-        try:
-            exclist = [exctypetxt,
-                       unicode(excval).encode(self._dbgClient.getCoding())]
-        except TypeError:
-            exclist = [exctypetxt, str(excval)]
-        
-        if exctb:
-            frlist = self.__extract_stack(exctb)
-            frlist.reverse()
-            
-            self.currentFrame = frlist[0]
-            
-            for fr in frlist:
-                filename = self._dbgClient.absPath(self.fix_frame_filename(fr))
-                
-                if os.path.basename(filename).startswith("DebugClient") or \
-                   os.path.basename(filename) == "bdb.py":
-                    break
-                
-                linenr = fr.f_lineno
-                ffunc = fr.f_code.co_name
-                
-                if ffunc == '?':
-                    ffunc = ''
-                
-                if ffunc and not ffunc.startswith("<"):
-                    argInfo = inspect.getargvalues(fr)
-                    try:
-                        fargs = inspect.formatargvalues(argInfo[0], argInfo[1],
-                                                        argInfo[2], argInfo[3])
-                    except Exception:
-                        fargs = ""
-                else:
-                    fargs = ""
-                
-                exclist.append([filename, linenr, ffunc, fargs])
-        
-        self._dbgClient.write("%s%s\n" % (ResponseException, unicode(exclist)))
-        
-        if exctb is None:
-            return
-        
-        self._dbgClient.eventLoop()
-    
-    def __extract_stack(self, exctb):
-        """
-        Private member to return a list of stack frames.
-        
-        @param exctb exception traceback
-        @return list of stack frames
-        """
-        tb = exctb
-        stack = []
-        while tb is not None:
-            stack.append(tb.tb_frame)
-            tb = tb.tb_next
-        tb = None
-        return stack
-
-    def user_return(self, frame, retval):
-        """
-        Public method reimplemented to report program termination to the
-        debug server.
-        
-        @param frame the frame object
-        @param retval the return value of the program
-        """
-        # The program has finished if we have just left the first frame.
-        if frame == self._dbgClient.mainFrame and \
-                self._mainThread:
-            atexit._run_exitfuncs()
-            self._dbgClient.progTerminated(retval)
-        elif frame is not self.stepFrame:
-            self.stepFrame = None
-            self.user_line(frame)
-
-    def stop_here(self, frame):
-        """
-        Public method reimplemented to filter out debugger files.
-        
-        Tracing is turned off for files that are part of the
-        debugger that are called from the application being debugged.
-        
-        @param frame the frame object
-        @return flag indicating whether the debugger should stop here
-        """
-        if self.__skip_it(frame):
-            return 0
-        return bdb.Bdb.stop_here(self, frame)
-
-    def __skip_it(self, frame):
-        """
-        Private method to filter out debugger files.
-        
-        Tracing is turned off for files that are part of the
-        debugger that are called from the application being debugged.
-        
-        @param frame the frame object
-        @return flag indicating whether the debugger should skip this frame
-        """
-        if frame is None:
-            return 1
-        
-        fn = self.fix_frame_filename(frame)
-
-        # Eliminate things like <string> and <stdin>.
-        if fn[0] == '<':
-            return 1
-
-        #XXX - think of a better way to do this.  It's only a convenience for
-        #debugging the debugger - when the debugger code is in the current
-        #directory.
-        if os.path.basename(fn) in [
-            'AsyncFile.py', 'AsyncIO.py',
-            'DebugConfig.py', 'DCTestResult.py',
-            'DebugBase.py', 'DebugClientBase.py',
-            'DebugClientCapabilities.py', 'DebugClient.py',
-            'DebugClientThreads.py', 'DebugProtocol.py',
-            'DebugThread.py', 'FlexCompleter.py',
-            'PyProfile.py'] or \
-           os.path.dirname(fn).endswith("coverage"):
-            return 1
-
-        if self._dbgClient.shouldSkip(fn):
-            return 1
-        
-        return 0
-    
-    def isBroken(self):
-        """
-        Public method to return the broken state of the debugger.
-        
-        @return flag indicating the broken state (boolean)
-        """
-        return self.__isBroken
-    
-    def getEvent(self):
-        """
-        Protected method to return the last debugger event.
-        
-        @return last debugger event (string)
-        """
-        return self.__event
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DebugClient.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2003 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing a Qt free version of the debug client.
-"""
-
-from AsyncIO import AsyncIO
-from DebugBase import DebugBase
-import DebugClientBase
-
-
-class DebugClient(DebugClientBase.DebugClientBase, AsyncIO, DebugBase):
-    """
-    Class implementing the client side of the debugger.
-    
-    This variant of the debugger implements the standard debugger client
-    by subclassing all relevant base classes.
-    """
-    def __init__(self):
-        """
-        Constructor
-        """
-        AsyncIO.__init__(self)
-        
-        DebugClientBase.DebugClientBase.__init__(self)
-        
-        DebugBase.__init__(self, self)
-        
-        self.variant = 'Standard'
-
-# We are normally called by the debugger to execute directly.
-
-if __name__ == '__main__':
-    debugClient = DebugClient()
-    debugClient.main()
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DebugClientBase.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2241 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2002 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing a debug client base class.
-"""
-
-import sys
-import socket
-import select
-import codeop
-import traceback
-import os
-import time
-import imp
-import re
-import atexit
-import signal
-import inspect
-
-
-import DebugProtocol
-import DebugClientCapabilities
-from DebugBase import setRecursionLimit, printerr   # __IGNORE_WARNING__
-from AsyncFile import AsyncFile, AsyncPendingWrite
-from DebugConfig import ConfigVarTypeStrings
-from FlexCompleter import Completer
-
-
-DebugClientInstance = None
-
-###############################################################################
-
-
-def DebugClientRawInput(prompt="", echo=1):
-    """
-    Replacement for the standard raw_input builtin.
-    
-    This function works with the split debugger.
-    
-    @param prompt prompt to be shown. (string)
-    @param echo flag indicating echoing of the input (boolean)
-    @return result of the raw_input() call
-    """
-    if DebugClientInstance is None or DebugClientInstance.redirect == 0:
-        return DebugClientOrigRawInput(prompt)
-
-    return DebugClientInstance.raw_input(prompt, echo)
-
-# Use our own raw_input().
-try:
-    DebugClientOrigRawInput = __builtins__.__dict__['raw_input']
-    __builtins__.__dict__['raw_input'] = DebugClientRawInput
-except (AttributeError, KeyError):
-    import __main__
-    DebugClientOrigRawInput = __main__.__builtins__.__dict__['raw_input']
-    __main__.__builtins__.__dict__['raw_input'] = DebugClientRawInput
-
-###############################################################################
-
-
-def DebugClientInput(prompt=""):
-    """
-    Replacement for the standard input builtin.
-    
-    This function works with the split debugger.
-    
-    @param prompt prompt to be shown (string)
-    @return result of the input() call
-    """
-    if DebugClientInstance is None or DebugClientInstance.redirect == 0:
-        return DebugClientOrigInput(prompt)
-
-    return DebugClientInstance.input(prompt)
-
-# Use our own input().
-try:
-    DebugClientOrigInput = __builtins__.__dict__['input']
-    __builtins__.__dict__['input'] = DebugClientInput
-except (AttributeError, KeyError):
-    import __main__
-    DebugClientOrigInput = __main__.__builtins__.__dict__['input']
-    __main__.__builtins__.__dict__['input'] = DebugClientInput
-
-###############################################################################
-
-
-def DebugClientFork():
-    """
-    Replacement for the standard os.fork().
-    
-    @return result of the fork() call
-    """
-    if DebugClientInstance is None:
-        return DebugClientOrigFork()
-    
-    return DebugClientInstance.fork()
-
-# use our own fork().
-if 'fork' in dir(os):
-    DebugClientOrigFork = os.fork
-    os.fork = DebugClientFork
-
-###############################################################################
-
-
-def DebugClientClose(fd):
-    """
-    Replacement for the standard os.close(fd).
-    
-    @param fd open file descriptor to be closed (integer)
-    """
-    if DebugClientInstance is None:
-        DebugClientOrigClose(fd)
-    
-    DebugClientInstance.close(fd)
-
-# use our own close().
-if 'close' in dir(os):
-    DebugClientOrigClose = os.close
-    os.close = DebugClientClose
-
-###############################################################################
-
-
-def DebugClientSetRecursionLimit(limit):
-    """
-    Replacement for the standard sys.setrecursionlimit(limit).
-    
-    @param limit recursion limit (integer)
-    """
-    rl = max(limit, 64)
-    setRecursionLimit(rl)
-    DebugClientOrigSetRecursionLimit(rl + 64)
-
-# use our own setrecursionlimit().
-if 'setrecursionlimit' in dir(sys):
-    DebugClientOrigSetRecursionLimit = sys.setrecursionlimit
-    sys.setrecursionlimit = DebugClientSetRecursionLimit
-    DebugClientSetRecursionLimit(sys.getrecursionlimit())
-
-###############################################################################
-
-
-class DebugClientBase(object):
-    """
-    Class implementing the client side of the debugger.
-
-    It provides access to the Python interpeter from a debugger running in
-    another process whether or not the Qt event loop is running.
-
-    The protocol between the debugger and the client assumes that there will be
-    a single source of debugger commands and a single source of Python
-    statements.  Commands and statement are always exactly one line and may be
-    interspersed.
-
-    The protocol is as follows.  First the client opens a connection to the
-    debugger and then sends a series of one line commands.  A command is either
-    &gt;Load&lt;, &gt;Step&lt;, &gt;StepInto&lt;, ... or a Python statement.
-    See DebugProtocol.py for a listing of valid protocol tokens.
-
-    A Python statement consists of the statement to execute, followed (in a
-    separate line) by &gt;OK?&lt;. If the statement was incomplete then the
-    response is &gt;Continue&lt;. If there was an exception then the response
-    is &gt;Exception&lt;. Otherwise the response is &gt;OK&lt;. The reason
-    for the &gt;OK?&lt; part is to provide a sentinal (ie. the responding
-    &gt;OK&lt;) after any possible output as a result of executing the command.
-
-    The client may send any other lines at any other time which should be
-    interpreted as program output.
-
-    If the debugger closes the session there is no response from the client.
-    The client may close the session at any time as a result of the script
-    being debugged closing or crashing.
-    
-    <b>Note</b>: This class is meant to be subclassed by individual
-    DebugClient classes. Do not instantiate it directly.
-    """
-    clientCapabilities = DebugClientCapabilities.HasAll
-    
-    def __init__(self):
-        """
-        Constructor
-        """
-        self.breakpoints = {}
-        self.redirect = 1
-
-        # The next couple of members are needed for the threaded version.
-        # For this base class they contain static values for the non threaded
-        # debugger
-        
-        # dictionary of all threads running
-        self.threads = {}
-        
-        # the "current" thread, basically the thread we are at a
-        # breakpoint for.
-        self.currentThread = self
-        
-        # special objects representing the main scripts thread and frame
-        self.mainThread = self
-        self.mainFrame = None
-        self.framenr = 0
-        
-        # The context to run the debugged program in.
-        self.debugMod = imp.new_module('__main__')
-        self.debugMod.__dict__['__builtins__'] = __builtins__
-
-        # The list of complete lines to execute.
-        self.buffer = ''
-        
-        # The list of regexp objects to filter variables against
-        self.globalsFilterObjects = []
-        self.localsFilterObjects = []
-
-        self.pendingResponse = DebugProtocol.ResponseOK
-        self._fncache = {}
-        self.dircache = []
-        self.inRawMode = 0
-        self.mainProcStr = None     # used for the passive mode
-        self.passive = 0            # used to indicate the passive mode
-        self.running = None
-        self.test = None
-        self.tracePython = 0
-        self.debugging = 0
-        
-        self.fork_auto = False
-        self.fork_child = False
-
-        self.readstream = None
-        self.writestream = None
-        self.errorstream = None
-        self.pollingDisabled = False
-        
-        self.callTraceEnabled = False
-        self.__newCallTraceEnabled = False
-        
-        self.skipdirs = sys.path[:]
-        
-        self.variant = 'You should not see this'
-        
-        # commandline completion stuff
-        self.complete = Completer(self.debugMod.__dict__).complete
-        
-        if sys.hexversion < 0x2020000:
-            self.compile_command = codeop.compile_command
-        else:
-            self.compile_command = codeop.CommandCompiler()
-        
-        self.coding_re = re.compile(r"coding[:=]\s*([-\w_.]+)")
-        self.defaultCoding = 'utf-8'
-        self.__coding = self.defaultCoding
-        self.noencoding = False
-
-    def getCoding(self):
-        """
-        Public method to return the current coding.
-        
-        @return codec name (string)
-        """
-        return self.__coding
-        
-    def __setCoding(self, filename):
-        """
-        Private method to set the coding used by a python file.
-        
-        @param filename name of the file to inspect (string)
-        """
-        if self.noencoding:
-            self.__coding = sys.getdefaultencoding()
-        else:
-            default = 'latin-1'
-            try:
-                f = open(filename, 'rb')
-                # read the first and second line
-                text = f.readline()
-                text = "%s%s" % (text, f.readline())
-                f.close()
-            except IOError:
-                self.__coding = default
-                return
-            
-            for l in text.splitlines():
-                m = self.coding_re.search(l)
-                if m:
-                    self.__coding = m.group(1)
-                    return
-            self.__coding = default
-
-    def attachThread(self, target=None, args=None, kwargs=None, mainThread=0):
-        """
-        Public method to setup a thread for DebugClient to debug.
-        
-        If mainThread is non-zero, then we are attaching to the already
-        started mainthread of the app and the rest of the args are ignored.
-        
-        This is just an empty function and is overridden in the threaded
-        debugger.
-        
-        @param target the start function of the target thread (i.e. the user
-            code)
-        @param args arguments to pass to target
-        @param kwargs keyword arguments to pass to target
-        @param mainThread non-zero, if we are attaching to the already
-              started mainthread of the app
-        """
-        if self.debugging:
-            sys.setprofile(self.profile)
-    
-    def __dumpThreadList(self):
-        """
-        Private method to send the list of threads.
-        """
-        threadList = []
-        if self.threads and self.currentThread:
-            # indication for the threaded debugger
-            currentId = self.currentThread.get_ident()
-            for t in self.threads.values():
-                d = {}
-                d["id"] = t.get_ident()
-                d["name"] = t.get_name()
-                d["broken"] = t.isBroken()
-                threadList.append(d)
-        else:
-            currentId = -1
-            d = {}
-            d["id"] = -1
-            d["name"] = "MainThread"
-            if hasattr(self, "isBroken"):
-                d["broken"] = self.isBroken()
-            else:
-                d["broken"] = False
-            threadList.append(d)
-        
-        self.write('%s%s\n' % (DebugProtocol.ResponseThreadList,
-                               unicode((currentId, threadList))))
-    
-    def raw_input(self, prompt, echo):
-        """
-        Public method to implement raw_input() using the event loop.
-        
-        @param prompt the prompt to be shown (string)
-        @param echo Flag indicating echoing of the input (boolean)
-        @return the entered string
-        """
-        self.write("%s%s\n" % (DebugProtocol.ResponseRaw,
-                               unicode((prompt, echo))))
-        self.inRawMode = 1
-        self.eventLoop(True)
-        return self.rawLine
-
-    def input(self, prompt):
-        """
-        Public method to implement input() using the event loop.
-        
-        @param prompt the prompt to be shown (string)
-        @return the entered string evaluated as a Python expresion
-        """
-        return eval(self.raw_input(prompt, 1))
-        
-    def __exceptionRaised(self):
-        """
-        Private method called in the case of an exception.
-        
-        It ensures that the debug server is informed of the raised exception.
-        """
-        self.pendingResponse = DebugProtocol.ResponseException
-    
-    def sessionClose(self, exit=1):
-        """
-        Public method to close the session with the debugger and optionally
-        terminate.
-        
-        @param exit flag indicating to terminate (boolean)
-        """
-        try:
-            self.set_quit()
-        except Exception:
-            pass
-
-        # clean up asyncio.
-        self.disconnect()
-        self.debugging = 0
-        
-        # make sure we close down our end of the socket
-        # might be overkill as normally stdin, stdout and stderr
-        # SHOULD be closed on exit, but it does not hurt to do it here
-        self.readstream.close(1)
-        self.writestream.close(1)
-        self.errorstream.close(1)
-
-        if exit:
-            # Ok, go away.
-            sys.exit()
-
-    def handleLine(self, line):
-        """
-        Public method to handle the receipt of a complete line.
-
-        It first looks for a valid protocol token at the start of the line.
-        Thereafter it trys to execute the lines accumulated so far.
-        
-        @param line the received line
-        """
-        # Remove any newline.
-        if line[-1] == '\n':
-            line = line[:-1]
-
-##        printerr(line)          ##debug
-
-        eoc = line.find('<')
-
-        if eoc >= 0 and line[0] == '>':
-            # Get the command part and any argument.
-            cmd = line[:eoc + 1]
-            arg = line[eoc + 1:]
-            
-            if cmd == DebugProtocol.RequestVariables:
-                frmnr, scope, filter = eval(arg)
-                self.__dumpVariables(int(frmnr), int(scope), filter)
-                return
-            
-            if cmd == DebugProtocol.RequestVariable:
-                var, frmnr, scope, filter = eval(arg)
-                self.__dumpVariable(var, int(frmnr), int(scope), filter)
-                return
-            
-            if cmd == DebugProtocol.RequestThreadList:
-                self.__dumpThreadList()
-                return
-            
-            if cmd == DebugProtocol.RequestThreadSet:
-                tid = eval(arg)
-                if tid in self.threads:
-                    self.setCurrentThread(tid)
-                    self.write(DebugProtocol.ResponseThreadSet + '\n')
-                    stack = self.currentThread.getStack()
-                    self.write('%s%s\n' % (DebugProtocol.ResponseStack,
-                                           unicode(stack)))
-                return
-            
-            if cmd == DebugProtocol.RequestStep:
-                self.currentThread.step(1)
-                self.eventExit = 1
-                return
-
-            if cmd == DebugProtocol.RequestStepOver:
-                self.currentThread.step(0)
-                self.eventExit = 1
-                return
-            
-            if cmd == DebugProtocol.RequestStepOut:
-                self.currentThread.stepOut()
-                self.eventExit = 1
-                return
-            
-            if cmd == DebugProtocol.RequestStepQuit:
-                if self.passive:
-                    self.progTerminated(42)
-                else:
-                    self.set_quit()
-                    self.eventExit = 1
-                return
-
-            if cmd == DebugProtocol.RequestContinue:
-                special = int(arg)
-                self.currentThread.go(special)
-                self.eventExit = 1
-                return
-
-            if cmd == DebugProtocol.RequestOK:
-                self.write(self.pendingResponse + '\n')
-                self.pendingResponse = DebugProtocol.ResponseOK
-                return
-
-            if cmd == DebugProtocol.RequestCallTrace:
-                if arg.strip().lower() == "on":
-                    callTraceEnabled = True
-                else:
-                    callTraceEnabled = False
-                if self.debugging:
-                    self.callTraceEnabled = callTraceEnabled
-                else:
-                    self.__newCallTraceEnabled = callTraceEnabled
-                    # remember for later
-                return
-            
-            if cmd == DebugProtocol.RequestEnv:
-                env = eval(arg)
-                for key, value in env.items():
-                    if key.endswith("+"):
-                        if key[:-1] in os.environ:
-                            os.environ[key[:-1]] += value
-                        else:
-                            os.environ[key[:-1]] = value
-                    else:
-                        os.environ[key] = value
-                return
-
-            if cmd == DebugProtocol.RequestLoad:
-                self._fncache = {}
-                self.dircache = []
-                sys.argv = []
-                wd, fn, args, tracePython = arg.split('|')
-                fn = fn.encode(sys.getfilesystemencoding())
-                self.__setCoding(fn)
-                sys.argv.append(fn)
-                sys.argv.extend(eval(args))
-                sys.path = self.__getSysPath(os.path.dirname(sys.argv[0]))
-                if wd == '':
-                    os.chdir(sys.path[1])
-                else:
-                    os.chdir(wd)
-                tracePython = int(tracePython)
-                self.running = sys.argv[0]
-                self.mainFrame = None
-                self.inRawMode = 0
-                self.debugging = 1
-                
-                self.threads.clear()
-                self.attachThread(mainThread=1)
-                
-                # set the system exception handling function to ensure, that
-                # we report on all unhandled exceptions
-                sys.excepthook = self.__unhandled_exception
-                self.__interceptSignals()
-                
-                # clear all old breakpoints, they'll get set after we
-                # have started
-                self.mainThread.clear_all_breaks()
-                
-                self.mainThread.tracePython = tracePython
-                
-                # This will eventually enter a local event loop.
-                # Note the use of backquotes to cause a repr of self.running.
-                # The need for this is on Windows os where backslash is the
-                # path separator. They will get inadvertantly stripped away
-                # during the eval causing IOErrors, if self.running is passed
-                # as a normal str.
-                self.debugMod.__dict__['__file__'] = self.running
-                sys.modules['__main__'] = self.debugMod
-                self.callTraceEnabled = self.__newCallTraceEnabled
-                res = self.mainThread.run(
-                    'execfile(' + repr(self.running) + ')',
-                    self.debugMod.__dict__)
-                self.progTerminated(res)
-                return
-
-            if cmd == DebugProtocol.RequestRun:
-                sys.argv = []
-                wd, fn, args = arg.split('|')
-                fn = fn.encode(sys.getfilesystemencoding())
-                self.__setCoding(fn)
-                sys.argv.append(fn)
-                sys.argv.extend(eval(args))
-                sys.path = self.__getSysPath(os.path.dirname(sys.argv[0]))
-                if wd == '':
-                    os.chdir(sys.path[1])
-                else:
-                    os.chdir(wd)
-
-                self.running = sys.argv[0]
-                self.mainFrame = None
-                self.botframe = None
-                self.inRawMode = 0
-                
-                self.threads.clear()
-                self.attachThread(mainThread=1)
-                
-                # set the system exception handling function to ensure, that
-                # we report on all unhandled exceptions
-                sys.excepthook = self.__unhandled_exception
-                self.__interceptSignals()
-                
-                self.mainThread.tracePython = 0
-                
-                self.debugMod.__dict__['__file__'] = sys.argv[0]
-                sys.modules['__main__'] = self.debugMod
-                res = 0
-                try:
-                    execfile(sys.argv[0], self.debugMod.__dict__)
-                except SystemExit as exc:
-                    res = exc.code
-                    atexit._run_exitfuncs()
-                self.writestream.flush()
-                self.progTerminated(res)
-                return
-
-            if cmd == DebugProtocol.RequestCoverage:
-                from coverage import coverage
-                sys.argv = []
-                wd, fn, args, erase = arg.split('@@')
-                fn = fn.encode(sys.getfilesystemencoding())
-                self.__setCoding(fn)
-                sys.argv.append(fn)
-                sys.argv.extend(eval(args))
-                sys.path = self.__getSysPath(os.path.dirname(sys.argv[0]))
-                if wd == '':
-                    os.chdir(sys.path[1])
-                else:
-                    os.chdir(wd)
-                
-                # set the system exception handling function to ensure, that
-                # we report on all unhandled exceptions
-                sys.excepthook = self.__unhandled_exception
-                self.__interceptSignals()
-                
-                # generate a coverage object
-                self.cover = coverage(
-                    auto_data=True,
-                    data_file="%s.coverage" % os.path.splitext(sys.argv[0])[0])
-                
-                if int(erase):
-                    self.cover.erase()
-                sys.modules['__main__'] = self.debugMod
-                self.debugMod.__dict__['__file__'] = sys.argv[0]
-                self.running = sys.argv[0]
-                res = 0
-                self.cover.start()
-                try:
-                    execfile(sys.argv[0], self.debugMod.__dict__)
-                except SystemExit as exc:
-                    res = exc.code
-                    atexit._run_exitfuncs()
-                self.cover.stop()
-                self.cover.save()
-                self.writestream.flush()
-                self.progTerminated(res)
-                return
-            
-            if cmd == DebugProtocol.RequestProfile:
-                sys.setprofile(None)
-                import PyProfile
-                sys.argv = []
-                wd, fn, args, erase = arg.split('|')
-                fn = fn.encode(sys.getfilesystemencoding())
-                self.__setCoding(fn)
-                sys.argv.append(fn)
-                sys.argv.extend(eval(args))
-                sys.path = self.__getSysPath(os.path.dirname(sys.argv[0]))
-                if wd == '':
-                    os.chdir(sys.path[1])
-                else:
-                    os.chdir(wd)
-
-                # set the system exception handling function to ensure, that
-                # we report on all unhandled exceptions
-                sys.excepthook = self.__unhandled_exception
-                self.__interceptSignals()
-                
-                # generate a profile object
-                self.prof = PyProfile.PyProfile(sys.argv[0])
-                
-                if int(erase):
-                    self.prof.erase()
-                self.debugMod.__dict__['__file__'] = sys.argv[0]
-                sys.modules['__main__'] = self.debugMod
-                self.running = sys.argv[0]
-                res = 0
-                try:
-                    self.prof.run('execfile(%r)' % sys.argv[0])
-                except SystemExit as exc:
-                    res = exc.code
-                    atexit._run_exitfuncs()
-                self.prof.save()
-                self.writestream.flush()
-                self.progTerminated(res)
-                return
-
-            if cmd == DebugProtocol.RequestShutdown:
-                self.sessionClose()
-                return
-            
-            if cmd == DebugProtocol.RequestBreak:
-                fn, line, temporary, set, cond = arg.split('@@')
-                fn = fn.encode(sys.getfilesystemencoding())
-                line = int(line)
-                set = int(set)
-                temporary = int(temporary)
-
-                if set:
-                    if cond == 'None' or cond == '':
-                        cond = None
-                    else:
-                        try:
-                            compile(cond, '<string>', 'eval')
-                        except SyntaxError:
-                            self.write(
-                                '%s%s,%d\n' %
-                                (DebugProtocol.ResponseBPConditionError,
-                                 fn, line))
-                            return
-                    self.mainThread.set_break(fn, line, temporary, cond)
-                else:
-                    self.mainThread.clear_break(fn, line)
-
-                return
-            
-            if cmd == DebugProtocol.RequestBreakEnable:
-                fn, line, enable = arg.split(',')
-                fn = fn.encode(sys.getfilesystemencoding())
-                line = int(line)
-                enable = int(enable)
-                
-                bp = self.mainThread.get_break(fn, line)
-                if bp is not None:
-                    if enable:
-                        bp.enable()
-                    else:
-                        bp.disable()
-                    
-                return
-            
-            if cmd == DebugProtocol.RequestBreakIgnore:
-                fn, line, count = arg.split(',')
-                fn = fn.encode(sys.getfilesystemencoding())
-                line = int(line)
-                count = int(count)
-                
-                bp = self.mainThread.get_break(fn, line)
-                if bp is not None:
-                    bp.ignore = count
-                    
-                return
-            
-            if cmd == DebugProtocol.RequestWatch:
-                cond, temporary, set = arg.split('@@')
-                set = int(set)
-                temporary = int(temporary)
-
-                if set:
-                    if not cond.endswith('??created??') and \
-                       not cond.endswith('??changed??'):
-                        try:
-                            compile(cond, '<string>', 'eval')
-                        except SyntaxError:
-                            self.write('%s%s\n' % (
-                                DebugProtocol.ResponseWPConditionError, cond))
-                            return
-                    self.mainThread.set_watch(cond, temporary)
-                else:
-                    self.mainThread.clear_watch(cond)
-
-                return
-            
-            if cmd == DebugProtocol.RequestWatchEnable:
-                cond, enable = arg.split(',')
-                enable = int(enable)
-                
-                bp = self.mainThread.get_watch(cond)
-                if bp is not None:
-                    if enable:
-                        bp.enable()
-                    else:
-                        bp.disable()
-                    
-                return
-            
-            if cmd == DebugProtocol.RequestWatchIgnore:
-                cond, count = arg.split(',')
-                count = int(count)
-                
-                bp = self.mainThread.get_watch(cond)
-                if bp is not None:
-                    bp.ignore = count
-                    
-                return
-            
-            if cmd == DebugProtocol.RequestEval:
-                try:
-                    value = eval(
-                        arg, self.currentThread.getCurrentFrame().f_globals,
-                        self.currentThread.getFrameLocals(self.framenr))
-                    self.currentThread.storeFrameLocals(self.framenr)
-                except Exception:
-                    # Report the exception and the traceback
-                    try:
-                        type, value, tb = sys.exc_info()
-                        sys.last_type = type
-                        sys.last_value = value
-                        sys.last_traceback = tb
-                        tblist = traceback.extract_tb(tb)
-                        del tblist[:1]
-                        list = traceback.format_list(tblist)
-                        if list:
-                            list.insert(0, "Traceback (innermost last):\n")
-                            list[len(list):] = \
-                                traceback.format_exception_only(type, value)
-                    finally:
-                        tblist = tb = None
-
-                    map(self.write, list)
-
-                    self.write(DebugProtocol.ResponseException + '\n')
-                
-                else:
-                    self.write(unicode(value) + '\n')
-                    self.write(DebugProtocol.ResponseOK + '\n')
-                
-                return
-            
-            if cmd == DebugProtocol.RequestExec:
-                _globals = self.currentThread.getCurrentFrame().f_globals
-                _locals = self.currentThread.getFrameLocals(self.framenr)
-                try:
-                    code = compile(arg + '\n', '<stdin>', 'single')
-                    exec code in _globals, _locals
-                    self.currentThread.storeFrameLocals(self.framenr)
-                except Exception:
-                    # Report the exception and the traceback
-                    try:
-                        type, value, tb = sys.exc_info()
-                        sys.last_type = type
-                        sys.last_value = value
-                        sys.last_traceback = tb
-                        tblist = traceback.extract_tb(tb)
-                        del tblist[:1]
-                        list = traceback.format_list(tblist)
-                        if list:
-                            list.insert(0, "Traceback (innermost last):\n")
-                            list[len(list):] = \
-                                traceback.format_exception_only(type, value)
-                    finally:
-                        tblist = tb = None
-
-                    map(self.write, list)
-
-                    self.write(DebugProtocol.ResponseException + '\n')
-                
-                return
-            
-            if cmd == DebugProtocol.RequestBanner:
-                self.write(
-                    '%s%s\n' % (
-                        DebugProtocol.ResponseBanner,
-                        unicode(("Python %s" % sys.version,
-                                 socket.gethostname(),
-                                 self.variant))))
-                return
-            
-            if cmd == DebugProtocol.RequestCapabilities:
-                self.write('%s%d, "Python2"\n' % (
-                    DebugProtocol.ResponseCapabilities,
-                    self.__clientCapabilities()))
-                return
-            
-            if cmd == DebugProtocol.RequestCompletion:
-                self.__completionList(arg)
-                return
-            
-            if cmd == DebugProtocol.RequestSetFilter:
-                scope, filterString = eval(arg)
-                self.__generateFilterObjects(int(scope), filterString)
-                return
-            
-            if cmd == DebugProtocol.RequestUTPrepare:
-                fn, tn, tfn, failed, cov, covname, erase = arg.split('|')
-                fn = fn.encode(sys.getfilesystemencoding())
-                sys.path.insert(0, os.path.dirname(os.path.abspath(fn)))
-                os.chdir(sys.path[0])
-                failed = eval(failed)
-
-                # set the system exception handling function to ensure, that
-                # we report on all unhandled exceptions
-                sys.excepthook = self.__unhandled_exception
-                self.__interceptSignals()
-                
-                try:
-                    import unittest
-                    utModule = __import__(tn)
-                    try:
-                        if failed:
-                            self.test = unittest.defaultTestLoader\
-                                .loadTestsFromNames(failed, utModule)
-                        else:
-                            self.test = unittest.defaultTestLoader\
-                                .loadTestsFromName(tfn, utModule)
-                    except AttributeError:
-                        self.test = unittest.defaultTestLoader\
-                            .loadTestsFromModule(utModule)
-                except Exception:
-                    exc_type, exc_value, exc_tb = sys.exc_info()
-                    self.write(
-                        '%s%s\n' % (
-                            DebugProtocol.ResponseUTPrepared,
-                            unicode((0, str(exc_type), str(exc_value)))))
-                    self.__exceptionRaised()
-                    return
-                
-                # generate a coverage object
-                if int(cov):
-                    from coverage import coverage
-                    self.cover = coverage(
-                        auto_data=True,
-                        data_file="%s.coverage" % os.path.splitext(covname)[0])
-                    if int(erase):
-                        self.cover.erase()
-                else:
-                    self.cover = None
-                
-                self.write(
-                    '%s%s\n' % (
-                        DebugProtocol.ResponseUTPrepared,
-                        unicode((self.test.countTestCases(), "", ""))))
-                return
-            
-            if cmd == DebugProtocol.RequestUTRun:
-                from DCTestResult import DCTestResult
-                self.testResult = DCTestResult(self)
-                if self.cover:
-                    self.cover.start()
-                self.test.run(self.testResult)
-                if self.cover:
-                    self.cover.stop()
-                    self.cover.save()
-                self.write('%s\n' % DebugProtocol.ResponseUTFinished)
-                return
-            
-            if cmd == DebugProtocol.RequestUTStop:
-                self.testResult.stop()
-                return
-            
-            if cmd == DebugProtocol.ResponseForkTo:
-                # this results from a separate event loop
-                self.fork_child = (arg == 'child')
-                self.eventExit = 1
-                return
-            
-            if cmd == DebugProtocol.RequestForkMode:
-                self.fork_auto, self.fork_child = eval(arg)
-                return
-        
-        # If we are handling raw mode input then reset the mode and break out
-        # of the current event loop.
-        if self.inRawMode:
-            self.inRawMode = 0
-            self.rawLine = line
-            self.eventExit = 1
-            return
-
-        if self.buffer:
-            self.buffer = self.buffer + '\n' + line
-        else:
-            self.buffer = line
-
-        try:
-            code = self.compile_command(self.buffer, self.readstream.name)
-        except (OverflowError, SyntaxError, ValueError):
-            # Report the exception
-            sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
-            map(self.write, traceback.format_exception_only(
-                sys.last_type, sys.last_value))
-            self.buffer = ''
-        else:
-            if code is None:
-                self.pendingResponse = DebugProtocol.ResponseContinue
-            else:
-                self.buffer = ''
-
-                try:
-                    if self.running is None:
-                        exec code in self.debugMod.__dict__
-                    else:
-                        if self.currentThread is None:
-                            # program has terminated
-                            self.running = None
-                            _globals = self.debugMod.__dict__
-                            _locals = _globals
-                        else:
-                            cf = self.currentThread.getCurrentFrame()
-                            # program has terminated
-                            if cf is None:
-                                self.running = None
-                                _globals = self.debugMod.__dict__
-                                _locals = _globals
-                            else:
-                                frmnr = self.framenr
-                                while cf is not None and frmnr > 0:
-                                    cf = cf.f_back
-                                    frmnr -= 1
-                                _globals = cf.f_globals
-                                _locals = \
-                                    self.currentThread.getFrameLocals(
-                                        self.framenr)
-                        # reset sys.stdout to our redirector (unconditionally)
-                        if "sys" in _globals:
-                            __stdout = _globals["sys"].stdout
-                            _globals["sys"].stdout = self.writestream
-                            exec code in _globals, _locals
-                            _globals["sys"].stdout = __stdout
-                        elif "sys" in _locals:
-                            __stdout = _locals["sys"].stdout
-                            _locals["sys"].stdout = self.writestream
-                            exec code in _globals, _locals
-                            _locals["sys"].stdout = __stdout
-                        else:
-                            exec code in _globals, _locals
-                        
-                        self.currentThread.storeFrameLocals(self.framenr)
-                except SystemExit, exc:
-                    self.progTerminated(exc.code)
-                except Exception:
-                    # Report the exception and the traceback
-                    try:
-                        type, value, tb = sys.exc_info()
-                        sys.last_type = type
-                        sys.last_value = value
-                        sys.last_traceback = tb
-                        tblist = traceback.extract_tb(tb)
-                        del tblist[:1]
-                        list = traceback.format_list(tblist)
-                        if list:
-                            list.insert(0, "Traceback (innermost last):\n")
-                            list[len(list):] = \
-                                traceback.format_exception_only(type, value)
-                    finally:
-                        tblist = tb = None
-
-                    map(self.write, list)
-
-    def __clientCapabilities(self):
-        """
-        Private method to determine the clients capabilities.
-        
-        @return client capabilities (integer)
-        """
-        try:
-            import PyProfile    # __IGNORE_WARNING__
-            try:
-                del sys.modules['PyProfile']
-            except KeyError:
-                pass
-            return self.clientCapabilities
-        except ImportError:
-            return (
-                self.clientCapabilities & ~DebugClientCapabilities.HasProfiler)
-        
-    def write(self, s):
-        """
-        Public method to write data to the output stream.
-        
-        @param s data to be written (string)
-        """
-        self.writestream.write(s)
-        self.writestream.flush()
-
-    def __interact(self):
-        """
-        Private method to interact with the debugger.
-        """
-        global DebugClientInstance
-
-        self.setDescriptors(self.readstream, self.writestream)
-        DebugClientInstance = self
-
-        if not self.passive:
-            # At this point simulate an event loop.
-            self.eventLoop()
-
-    def eventLoop(self, disablePolling=False):
-        """
-        Public method implementing our event loop.
-        
-        @param disablePolling flag indicating to enter an event loop with
-            polling disabled (boolean)
-        """
-        self.eventExit = None
-        self.pollingDisabled = disablePolling
-
-        while self.eventExit is None:
-            wrdy = []
-
-            if self.writestream.nWriteErrors > self.writestream.maxtries:
-                break
-            
-            if AsyncPendingWrite(self.writestream):
-                wrdy.append(self.writestream)
-
-            if AsyncPendingWrite(self.errorstream):
-                wrdy.append(self.errorstream)
-            
-            try:
-                rrdy, wrdy, xrdy = select.select([self.readstream], wrdy, [])
-            except (select.error, KeyboardInterrupt, socket.error):
-                # just carry on
-                continue
-
-            if self.readstream in rrdy:
-                self.readReady(self.readstream.fileno())
-
-            if self.writestream in wrdy:
-                self.writeReady(self.writestream.fileno())
-
-            if self.errorstream in wrdy:
-                self.writeReady(self.errorstream.fileno())
-
-        self.eventExit = None
-        self.pollingDisabled = False
-
-    def eventPoll(self):
-        """
-        Public method to poll for events like 'set break point'.
-        """
-        if self.pollingDisabled:
-            return
-        
-        # the choice of a ~0.5 second poll interval is arbitrary.
-        lasteventpolltime = getattr(self, 'lasteventpolltime', time.time())
-        now = time.time()
-        if now - lasteventpolltime < 0.5:
-            self.lasteventpolltime = lasteventpolltime
-            return
-        else:
-            self.lasteventpolltime = now
-
-        wrdy = []
-        if AsyncPendingWrite(self.writestream):
-            wrdy.append(self.writestream)
-
-        if AsyncPendingWrite(self.errorstream):
-            wrdy.append(self.errorstream)
-        
-        # immediate return if nothing is ready.
-        try:
-            rrdy, wrdy, xrdy = select.select([self.readstream], wrdy, [], 0)
-        except (select.error, KeyboardInterrupt, socket.error):
-            return
-
-        if self.readstream in rrdy:
-            self.readReady(self.readstream.fileno())
-
-        if self.writestream in wrdy:
-            self.writeReady(self.writestream.fileno())
-
-        if self.errorstream in wrdy:
-            self.writeReady(self.errorstream.fileno())
-        
-    def connectDebugger(self, port, remoteAddress=None, redirect=1):
-        """
-        Public method to establish a session with the debugger.
-        
-        It opens a network connection to the debugger, connects it to stdin,
-        stdout and stderr and saves these file objects in case the application
-        being debugged redirects them itself.
-        
-        @param port the port number to connect to (int)
-        @param remoteAddress the network address of the debug server host
-            (string)
-        @param redirect flag indicating redirection of stdin, stdout and
-            stderr (boolean)
-        """
-        if remoteAddress is None:
-            remoteAddress = "127.0.0.1"
-        elif "@@i" in remoteAddress:
-            remoteAddress = remoteAddress.split("@@i")[0]
-        sock = socket.create_connection((remoteAddress, port))
-
-        self.readstream = AsyncFile(sock, sys.stdin.mode, sys.stdin.name)
-        self.writestream = AsyncFile(sock, sys.stdout.mode, sys.stdout.name)
-        self.errorstream = AsyncFile(sock, sys.stderr.mode, sys.stderr.name)
-        
-        if redirect:
-            sys.stdin = self.readstream
-            sys.stdout = self.writestream
-            sys.stderr = self.errorstream
-        self.redirect = redirect
-        
-        # attach to the main thread here
-        self.attachThread(mainThread=1)
-
-    def __unhandled_exception(self, exctype, excval, exctb):
-        """
-        Private method called to report an uncaught exception.
-        
-        @param exctype the type of the exception
-        @param excval data about the exception
-        @param exctb traceback for the exception
-        """
-        self.mainThread.user_exception(None, (exctype, excval, exctb), 1)
-    
-    def __interceptSignals(self):
-        """
-        Private method to intercept common signals.
-        """
-        for signum in [
-            signal.SIGABRT,                 # abnormal termination
-            signal.SIGFPE,                  # floating point exception
-            signal.SIGILL,                  # illegal instruction
-            signal.SIGSEGV,                 # segmentation violation
-        ]:
-            signal.signal(signum, self.__signalHandler)
-    
-    def __signalHandler(self, signalNumber, stackFrame):
-        """
-        Private method to handle signals.
-        
-        @param signalNumber number of the signal to be handled
-        @type int
-        @param stackFrame current stack frame
-        @type frame object
-        """
-        if signalNumber == signal.SIGABRT:
-            message = "Abnormal Termination"
-        elif signalNumber == signal.SIGFPE:
-            message = "Floating Point Exception"
-        elif signalNumber == signal.SIGILL:
-            message = "Illegal Instruction"
-        elif signalNumber == signal.SIGSEGV:
-            message = "Segmentation Violation"
-        else:
-            message = "Unknown Signal '%d'" % signalNumber
-        
-        filename = self.absPath(stackFrame)
-        
-        linenr = stackFrame.f_lineno
-        ffunc = stackFrame.f_code.co_name
-        
-        if ffunc == '?':
-            ffunc = ''
-        
-        if ffunc and not ffunc.startswith("<"):
-            argInfo = inspect.getargvalues(stackFrame)
-            try:
-                fargs = inspect.formatargvalues(
-                    argInfo.args, argInfo.varargs,
-                    argInfo.keywords, argInfo.locals)
-            except Exception:
-                fargs = ""
-        else:
-            fargs = ""
-        
-        siglist = [message, [filename, linenr, ffunc, fargs]]
-        
-        self.write("%s%s" % (DebugProtocol.ResponseSignal, str(siglist)))
-        
-    def absPath(self, fn):
-        """
-        Public method to convert a filename to an absolute name.
-
-        sys.path is used as a set of possible prefixes. The name stays
-        relative if a file could not be found.
-        
-        @param fn filename (string)
-        @return the converted filename (string)
-        """
-        if os.path.isabs(fn):
-            return fn
-
-        # Check the cache.
-        if fn in self._fncache:
-            return self._fncache[fn]
-
-        # Search sys.path.
-        for p in sys.path:
-            afn = os.path.abspath(os.path.join(p, fn))
-            nafn = os.path.normcase(afn)
-
-            if os.path.exists(nafn):
-                self._fncache[fn] = afn
-                d = os.path.dirname(afn)
-                if (d not in sys.path) and (d not in self.dircache):
-                    self.dircache.append(d)
-                return afn
-
-        # Search the additional directory cache
-        for p in self.dircache:
-            afn = os.path.abspath(os.path.join(p, fn))
-            nafn = os.path.normcase(afn)
-            
-            if os.path.exists(nafn):
-                self._fncache[fn] = afn
-                return afn
-                
-        # Nothing found.
-        return fn
-
-    def shouldSkip(self, fn):
-        """
-        Public method to check if a file should be skipped.
-        
-        @param fn filename to be checked
-        @return non-zero if fn represents a file we are 'skipping',
-            zero otherwise.
-        """
-        if self.mainThread.tracePython:     # trace into Python library
-            return 0
-            
-        # Eliminate anything that is part of the Python installation.
-        afn = self.absPath(fn)
-        for d in self.skipdirs:
-            if afn.startswith(d):
-                return 1
-        
-        # special treatment for paths containing site-packages or dist-packages
-        for part in ["site-packages", "dist-packages"]:
-            if part in afn:
-                return 1
-        
-        return 0
-        
-    def getRunning(self):
-        """
-        Public method to return the main script we are currently running.
-        
-        @return flag indicating a running debug session (boolean)
-        """
-        return self.running
-
-    def progTerminated(self, status):
-        """
-        Public method to tell the debugger that the program has terminated.
-        
-        @param status return status
-        @type int
-        """
-        if status is None:
-            status = 0
-        else:
-            try:
-                int(status)
-            except ValueError:
-                status = 1
-
-        if self.running:
-            self.set_quit()
-            self.running = None
-            self.write('%s%d\n' % (DebugProtocol.ResponseExit, status))
-        
-        # reset coding
-        self.__coding = self.defaultCoding
-
-    def __dumpVariables(self, frmnr, scope, filter):
-        """
-        Private method to return the variables of a frame to the debug server.
-        
-        @param frmnr distance of frame reported on. 0 is the current frame
-            (int)
-        @param scope 1 to report global variables, 0 for local variables (int)
-        @param filter the indices of variable types to be filtered (list of
-            int)
-        """
-        if self.currentThread is None:
-            return
-        
-        if scope == 0:
-            self.framenr = frmnr
-        
-        f = self.currentThread.getCurrentFrame()
-        
-        while f is not None and frmnr > 0:
-            f = f.f_back
-            frmnr -= 1
-        
-        if f is None:
-            if scope:
-                dict = self.debugMod.__dict__
-            else:
-                scope = -1
-        elif scope:
-            dict = f.f_globals
-        elif f.f_globals is f.f_locals:
-                scope = -1
-        else:
-            dict = f.f_locals
-            
-        varlist = [scope]
-        
-        if scope != -1:
-            keylist = dict.keys()
-            
-            vlist = self.__formatVariablesList(keylist, dict, scope, filter)
-            varlist.extend(vlist)
-            
-        self.write('%s%s\n' % (
-            DebugProtocol.ResponseVariables, unicode(varlist)))
-    
-    def __dumpVariable(self, var, frmnr, scope, filter):
-        """
-        Private method to return the variables of a frame to the debug server.
-        
-        @param var list encoded name of the requested variable
-            (list of strings)
-        @param frmnr distance of frame reported on. 0 is the current frame
-            (int)
-        @param scope 1 to report global variables, 0 for local variables (int)
-        @param filter the indices of variable types to be filtered
-            (list of int)
-        """
-        if self.currentThread is None:
-            return
-        
-        f = self.currentThread.getCurrentFrame()
-        
-        while f is not None and frmnr > 0:
-            f = f.f_back
-            frmnr -= 1
-        
-        if f is None:
-            if scope:
-                dict = self.debugMod.__dict__
-            else:
-                scope = -1
-        elif scope:
-            dict = f.f_globals
-        elif f.f_globals is f.f_locals:
-                scope = -1
-        else:
-            dict = f.f_locals
-        
-        varlist = [scope, var]
-        
-        if scope != -1:
-            # search the correct dictionary
-            i = 0
-            rvar = var[:]
-            dictkeys = None
-            obj = None
-            isDict = 0
-            formatSequences = 0
-            access = ""
-            oaccess = ""
-            odict = dict
-            
-            qtVariable = False
-            qvar = None
-            qvtype = ""
-            
-            while i < len(var):
-                if len(dict):
-                    udict = dict
-                ndict = {}
-                # this has to be in line with VariablesViewer.indicators
-                if var[i][-2:] in ["[]", "()", "{}"]:   # __IGNORE_WARNING__
-                    if i + 1 == len(var):
-                        if var[i][:-2] == '...':
-                            dictkeys = [var[i - 1]]
-                        else:
-                            dictkeys = [var[i][:-2]]
-                        formatSequences = 1
-                        if not access and not oaccess:
-                            if var[i][:-2] == '...':
-                                access = '["%s"]' % var[i - 1]
-                                dict = odict
-                            else:
-                                access = '["%s"]' % var[i][:-2]
-                        else:
-                            if var[i][:-2] == '...':
-                                if oaccess:
-                                    access = oaccess
-                                else:
-                                    access = '%s[%s]' % (access, var[i - 1])
-                                dict = odict
-                            else:
-                                if oaccess:
-                                    access = '%s[%s]' % (oaccess, var[i][:-2])
-                                    oaccess = ''
-                                else:
-                                    access = '%s[%s]' % (access, var[i][:-2])
-                        if var[i][-2:] == "{}":         # __IGNORE_WARNING__
-                            isDict = 1
-                        break
-                    else:
-                        if not access:
-                            if var[i][:-2] == '...':
-                                access = '["%s"]' % var[i - 1]
-                                dict = odict
-                            else:
-                                access = '["%s"]' % var[i][:-2]
-                        else:
-                            if var[i][:-2] == '...':
-                                access = '%s[%s]' % (access, var[i - 1])
-                                dict = odict
-                            else:
-                                if oaccess:
-                                    access = '%s[%s]' % (oaccess, var[i][:-2])
-                                    oaccess = ''
-                                else:
-                                    access = '%s[%s]' % (access, var[i][:-2])
-                else:
-                    if access:
-                        if oaccess:
-                            access = '%s[%s]' % (oaccess, var[i])
-                        else:
-                            access = '%s[%s]' % (access, var[i])
-                        if var[i - 1][:-2] == '...':
-                            oaccess = access
-                        else:
-                            oaccess = ''
-                        try:
-                            exec 'mdict = dict%s.__dict__' % access
-                            ndict.update(mdict)     # __IGNORE_WARNING__
-                            exec 'obj = dict%s' % access
-                            if "PyQt4." in str(type(obj)) or \
-                                    "PyQt5." in str(type(obj)):
-                                qtVariable = True
-                                qvar = obj
-                                qvtype = ("%s" % type(qvar))[1:-1]\
-                                    .split()[1][1:-1]
-                        except Exception:
-                            pass
-                        try:
-                            exec 'mcdict = dict%s.__class__.__dict__' % access
-                            ndict.update(mcdict)     # __IGNORE_WARNING__
-                            if mdict and "sipThis" not in mdict.keys():  # __IGNORE_WARNING__
-                                del rvar[0:2]
-                                access = ""
-                        except Exception:
-                            pass
-                        try:
-                            cdict = {}
-                            exec 'slv = dict%s.__slots__' % access
-                            for v in slv:   # __IGNORE_WARNING__
-                                try:
-                                    exec 'cdict[v] = dict%s.%s' % (access, v)
-                                except Exception:
-                                    pass
-                            ndict.update(cdict)
-                            exec 'obj = dict%s' % access
-                            access = ""
-                            if "PyQt4." in str(type(obj)) or \
-                                    "PyQt5." in str(type(obj)):
-                                qtVariable = True
-                                qvar = obj
-                                qvtype = ("%s" % type(qvar))[1:-1]\
-                                    .split()[1][1:-1]
-                        except Exception:
-                            pass
-                    else:
-                        try:
-                            ndict.update(dict[var[i]].__dict__)
-                            ndict.update(dict[var[i]].__class__.__dict__)
-                            del rvar[0]
-                            obj = dict[var[i]]
-                            if "PyQt4." in str(type(obj)) or \
-                                    "PyQt5." in str(type(obj)):
-                                qtVariable = True
-                                qvar = obj
-                                qvtype = ("%s" % type(qvar))[1:-1]\
-                                    .split()[1][1:-1]
-                        except Exception:
-                            pass
-                        try:
-                            cdict = {}
-                            slv = dict[var[i]].__slots__
-                            for v in slv:
-                                try:
-                                    exec 'cdict[v] = dict[var[i]].%s' % v
-                                except Exception:
-                                    pass
-                            ndict.update(cdict)
-                            obj = dict[var[i]]
-                            if "PyQt4." in str(type(obj)) or \
-                                    "PyQt5." in str(type(obj)):
-                                qtVariable = True
-                                qvar = obj
-                                qvtype = ("%s" % type(qvar))[1:-1]\
-                                    .split()[1][1:-1]
-                        except Exception:
-                            pass
-                    odict = dict
-                    dict = ndict
-                i += 1
-            
-            if qtVariable:
-                vlist = self.__formatQtVariable(qvar, qvtype)
-            elif ("sipThis" in dict.keys() and len(dict) == 1) or \
-                    (len(dict) == 0 and len(udict) > 0):
-                if access:
-                    exec 'qvar = udict%s' % access
-                # this has to be in line with VariablesViewer.indicators
-                elif rvar and rvar[0][-2:] in ["[]", "()", "{}"]:   # __IGNORE_WARNING__
-                    exec 'qvar = udict["%s"][%s]' % (rvar[0][:-2], rvar[1])
-                else:
-                    qvar = udict[var[-1]]
-                qvtype = ("%s" % type(qvar))[1:-1].split()[1][1:-1]
-                if qvtype.startswith(("PyQt4", "PyQt5")):
-                    vlist = self.__formatQtVariable(qvar, qvtype)
-                else:
-                    vlist = []
-            else:
-                qtVariable = False
-                if len(dict) == 0 and len(udict) > 0:
-                    if access:
-                        exec 'qvar = udict%s' % access
-                    # this has to be in line with VariablesViewer.indicators
-                    elif rvar and rvar[0][-2:] in ["[]", "()", "{}"]:   # __IGNORE_WARNING__
-                        exec 'qvar = udict["%s"][%s]' % (rvar[0][:-2], rvar[1])
-                    else:
-                        qvar = udict[var[-1]]
-                    qvtype = ("%s" % type(qvar))[1:-1].split()[1][1:-1]
-                    if qvtype.startswith(("PyQt4", "PyQt5")):
-                        qtVariable = True
-                
-                if qtVariable:
-                    vlist = self.__formatQtVariable(qvar, qvtype)
-                else:
-                    # format the dictionary found
-                    if dictkeys is None:
-                        dictkeys = dict.keys()
-                    else:
-                        # treatment for sequences and dictionaries
-                        if access:
-                            exec "dict = dict%s" % access
-                        else:
-                            dict = dict[dictkeys[0]]
-                        if isDict:
-                            dictkeys = dict.keys()
-                        else:
-                            dictkeys = range(len(dict))
-                    vlist = self.__formatVariablesList(
-                        dictkeys, dict, scope, filter, formatSequences)
-            varlist.extend(vlist)
-        
-            if obj is not None and not formatSequences:
-                try:
-                    if unicode(repr(obj)).startswith('{'):
-                        varlist.append(('...', 'dict', "%d" % len(obj.keys())))
-                    elif unicode(repr(obj)).startswith('['):
-                        varlist.append(('...', 'list', "%d" % len(obj)))
-                    elif unicode(repr(obj)).startswith('('):
-                        varlist.append(('...', 'tuple', "%d" % len(obj)))
-                except Exception:
-                    pass
-        
-        self.write('%s%s\n' % (
-            DebugProtocol.ResponseVariable, unicode(varlist)))
-        
-    def __formatQtVariable(self, value, vtype):
-        """
-        Private method to produce a formated output of a simple Qt4/Qt5 type.
-        
-        @param value variable to be formated
-        @param vtype type of the variable to be formatted (string)
-        @return A tuple consisting of a list of formatted variables. Each
-            variable entry is a tuple of three elements, the variable name,
-            its type and value.
-        """
-        qttype = vtype.split('.')[-1]
-        varlist = []
-        if qttype == 'QChar':
-            varlist.append(("", "QChar", "%s" % unichr(value.unicode())))
-            varlist.append(("", "int", "%d" % value.unicode()))
-        elif qttype == 'QByteArray':
-            varlist.append(("hex", "QByteArray", "%s" % value.toHex()))
-            varlist.append(("base64", "QByteArray", "%s" % value.toBase64()))
-            varlist.append(("percent encoding", "QByteArray",
-                            "%s" % value.toPercentEncoding()))
-        elif qttype == 'QString':
-            varlist.append(("", "QString", "%s" % value))
-        elif qttype == 'QStringList':
-            for i in range(value.count()):
-                varlist.append(("%d" % i, "QString", "%s" % value[i]))
-        elif qttype == 'QPoint':
-            varlist.append(("x", "int", "%d" % value.x()))
-            varlist.append(("y", "int", "%d" % value.y()))
-        elif qttype == 'QPointF':
-            varlist.append(("x", "float", "%g" % value.x()))
-            varlist.append(("y", "float", "%g" % value.y()))
-        elif qttype == 'QRect':
-            varlist.append(("x", "int", "%d" % value.x()))
-            varlist.append(("y", "int", "%d" % value.y()))
-            varlist.append(("width", "int", "%d" % value.width()))
-            varlist.append(("height", "int", "%d" % value.height()))
-        elif qttype == 'QRectF':
-            varlist.append(("x", "float", "%g" % value.x()))
-            varlist.append(("y", "float", "%g" % value.y()))
-            varlist.append(("width", "float", "%g" % value.width()))
-            varlist.append(("height", "float", "%g" % value.height()))
-        elif qttype == 'QSize':
-            varlist.append(("width", "int", "%d" % value.width()))
-            varlist.append(("height", "int", "%d" % value.height()))
-        elif qttype == 'QSizeF':
-            varlist.append(("width", "float", "%g" % value.width()))
-            varlist.append(("height", "float", "%g" % value.height()))
-        elif qttype == 'QColor':
-            varlist.append(("name", "str", "%s" % value.name()))
-            r, g, b, a = value.getRgb()
-            varlist.append(("rgba", "int", "%d, %d, %d, %d" % (r, g, b, a)))
-            h, s, v, a = value.getHsv()
-            varlist.append(("hsva", "int", "%d, %d, %d, %d" % (h, s, v, a)))
-            c, m, y, k, a = value.getCmyk()
-            varlist.append(
-                ("cmyka", "int", "%d, %d, %d, %d, %d" % (c, m, y, k, a)))
-        elif qttype == 'QDate':
-            varlist.append(("", "QDate", "%s" % value.toString()))
-        elif qttype == 'QTime':
-            varlist.append(("", "QTime", "%s" % value.toString()))
-        elif qttype == 'QDateTime':
-            varlist.append(("", "QDateTime", "%s" % value.toString()))
-        elif qttype == 'QDir':
-            varlist.append(("path", "str", "%s" % value.path()))
-            varlist.append(
-                ("absolutePath", "str", "%s" % value.absolutePath()))
-            varlist.append(
-                ("canonicalPath", "str", "%s" % value.canonicalPath()))
-        elif qttype == 'QFile':
-            varlist.append(("fileName", "str", "%s" % value.fileName()))
-        elif qttype == 'QFont':
-            varlist.append(("family", "str", "%s" % value.family()))
-            varlist.append(("pointSize", "int", "%d" % value.pointSize()))
-            varlist.append(("weight", "int", "%d" % value.weight()))
-            varlist.append(("bold", "bool", "%s" % value.bold()))
-            varlist.append(("italic", "bool", "%s" % value.italic()))
-        elif qttype == 'QUrl':
-            varlist.append(("url", "str", "%s" % value.toString()))
-            varlist.append(("scheme", "str", "%s" % value.scheme()))
-            varlist.append(("user", "str", "%s" % value.userName()))
-            varlist.append(("password", "str", "%s" % value.password()))
-            varlist.append(("host", "str", "%s" % value.host()))
-            varlist.append(("port", "int", "%d" % value.port()))
-            varlist.append(("path", "str", "%s" % value.path()))
-        elif qttype == 'QModelIndex':
-            varlist.append(("valid", "bool", "%s" % value.isValid()))
-            if value.isValid():
-                varlist.append(("row", "int", "%s" % value.row()))
-                varlist.append(("column", "int", "%s" % value.column()))
-                varlist.append(
-                    ("internalId", "int", "%s" % value.internalId()))
-                varlist.append(
-                    ("internalPointer", "void *", "%s" %
-                     value.internalPointer()))
-        elif qttype == 'QRegExp':
-            varlist.append(("pattern", "str", "%s" % value.pattern()))
-        
-        # GUI stuff
-        elif qttype == 'QAction':
-            varlist.append(("name", "str", "%s" % value.objectName()))
-            varlist.append(("text", "str", "%s" % value.text()))
-            varlist.append(("icon text", "str", "%s" % value.iconText()))
-            varlist.append(("tooltip", "str", "%s" % value.toolTip()))
-            varlist.append(("whatsthis", "str", "%s" % value.whatsThis()))
-            varlist.append(
-                ("shortcut", "str", "%s" % value.shortcut().toString()))
-        elif qttype == 'QKeySequence':
-            varlist.append(("value", "", "%s" % value.toString()))
-            
-        # XML stuff
-        elif qttype == 'QDomAttr':
-            varlist.append(("name", "str", "%s" % value.name()))
-            varlist.append(("value", "str", "%s" % value.value()))
-        elif qttype == 'QDomCharacterData':
-            varlist.append(("data", "str", "%s" % value.data()))
-        elif qttype == 'QDomComment':
-            varlist.append(("data", "str", "%s" % value.data()))
-        elif qttype == "QDomDocument":
-            varlist.append(("text", "str", "%s" % value.toString()))
-        elif qttype == 'QDomElement':
-            varlist.append(("tagName", "str", "%s" % value.tagName()))
-            varlist.append(("text", "str", "%s" % value.text()))
-        elif qttype == 'QDomText':
-            varlist.append(("data", "str", "%s" % value.data()))
-            
-        # Networking stuff
-        elif qttype == 'QHostAddress':
-            varlist.append(
-                ("address", "QHostAddress", "%s" % value.toString()))
-            
-        return varlist
-        
-    def __formatVariablesList(self, keylist, dict, scope, filter=[],
-                              formatSequences=0):
-        """
-        Private method to produce a formated variables list.
-        
-        The dictionary passed in to it is scanned. Variables are
-        only added to the list, if their type is not contained
-        in the filter list and their name doesn't match any of
-        the filter expressions. The formated variables list (a list of tuples
-        of 3 values) is returned.
-        
-        @param keylist keys of the dictionary
-        @param dict the dictionary to be scanned
-        @param scope 1 to filter using the globals filter, 0 using the locals
-            filter (int).
-            Variables are only added to the list, if their name do not match
-            any of the filter expressions.
-        @param filter the indices of variable types to be filtered. Variables
-            are only added to the list, if their type is not contained in the
-            filter list.
-        @param formatSequences flag indicating, that sequence or dictionary
-            variables should be formatted. If it is 0 (or false), just the
-            number of items contained in these variables is returned. (boolean)
-        @return A tuple consisting of a list of formatted variables. Each
-            variable entry is a tuple of three elements, the variable name,
-            its type and value.
-        """
-        varlist = []
-        if scope:
-            patternFilterObjects = self.globalsFilterObjects
-        else:
-            patternFilterObjects = self.localsFilterObjects
-        
-        for key in keylist:
-            # filter based on the filter pattern
-            matched = 0
-            for pat in patternFilterObjects:
-                if pat.match(unicode(key)):
-                    matched = 1
-                    break
-            if matched:
-                continue
-            
-            # filter hidden attributes (filter #0)
-            if 0 in filter and unicode(key)[:2] == '__':
-                continue
-            
-            # special handling for '__builtins__' (it's way too big)
-            if key == '__builtins__':
-                rvalue = '<module __builtin__ (built-in)>'
-                valtype = 'module'
-            else:
-                value = dict[key]
-                valtypestr = ("%s" % type(value))[1:-1]
-                    
-                if valtypestr.split(' ', 1)[0] == 'class':
-                    # handle new class type of python 2.2+
-                    if ConfigVarTypeStrings.index('instance') in filter:
-                        continue
-                    valtype = valtypestr
-                else:
-                    valtype = valtypestr[6:-1]
-                    try:
-                        if ConfigVarTypeStrings.index(valtype) in filter:
-                            continue
-                    except ValueError:
-                        if valtype == "classobj":
-                            if ConfigVarTypeStrings.index(
-                                    'instance') in filter:
-                                continue
-                        elif valtype == "sip.methoddescriptor":
-                            if ConfigVarTypeStrings.index(
-                                    'instance method') in filter:
-                                continue
-                        elif valtype == "sip.enumtype":
-                            if ConfigVarTypeStrings.index('class') in filter:
-                                continue
-                        elif not valtype.startswith("PySide") and \
-                                ConfigVarTypeStrings.index('other') in filter:
-                            continue
-                    
-                try:
-                    if valtype not in ['list', 'tuple', 'dict']:
-                        rvalue = repr(value)
-                        if valtype.startswith('class') and \
-                           rvalue[0] in ['{', '(', '[']:
-                            rvalue = ""
-                    else:
-                        if valtype == 'dict':
-                            rvalue = "%d" % len(value.keys())
-                        else:
-                            rvalue = "%d" % len(value)
-                except Exception:
-                    rvalue = ''
-                
-            if formatSequences:
-                if unicode(key) == key:
-                    key = "'%s'" % key
-                else:
-                    key = unicode(key)
-            varlist.append((key, valtype, rvalue))
-        
-        return varlist
-        
-    def __generateFilterObjects(self, scope, filterString):
-        """
-        Private slot to convert a filter string to a list of filter objects.
-        
-        @param scope 1 to generate filter for global variables, 0 for local
-            variables (int)
-        @param filterString string of filter patterns separated by ';'
-        """
-        patternFilterObjects = []
-        for pattern in filterString.split(';'):
-            patternFilterObjects.append(re.compile('^%s$' % pattern))
-        if scope:
-            self.globalsFilterObjects = patternFilterObjects[:]
-        else:
-            self.localsFilterObjects = patternFilterObjects[:]
-        
-    def __completionList(self, text):
-        """
-        Private slot to handle the request for a commandline completion list.
-        
-        @param text the text to be completed (string)
-        """
-        completerDelims = ' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?'
-        
-        completions = set()
-        # find position of last delim character
-        pos = -1
-        while pos >= -len(text):
-            if text[pos] in completerDelims:
-                if pos == -1:
-                    text = ''
-                else:
-                    text = text[pos + 1:]
-                break
-            pos -= 1
-        
-        # Get local and global completions
-        try:
-            localdict = self.currentThread.getFrameLocals(self.framenr)
-            localCompleter = Completer(localdict).complete
-            self.__getCompletionList(text, localCompleter, completions)
-        except AttributeError:
-            pass
-        self.__getCompletionList(text, self.complete, completions)
-        
-        self.write("%s%s||%s\n" % (DebugProtocol.ResponseCompletion,
-                                   unicode(list(completions)), text))
-
-    def __getCompletionList(self, text, completer, completions):
-        """
-        Private method to create a completions list.
-        
-        @param text text to complete (string)
-        @param completer completer methode
-        @param completions set where to add new completions strings (set)
-        """
-        state = 0
-        try:
-            comp = completer(text, state)
-        except Exception:
-            comp = None
-        while comp is not None:
-            completions.add(comp)
-            state += 1
-            try:
-                comp = completer(text, state)
-            except Exception:
-                comp = None
-
-    def startDebugger(self, filename=None, host=None, port=None,
-                      enableTrace=1, exceptions=1, tracePython=0, redirect=1):
-        """
-        Public method used to start the remote debugger.
-        
-        @param filename the program to be debugged (string)
-        @param host hostname of the debug server (string)
-        @param port portnumber of the debug server (int)
-        @param enableTrace flag to enable the tracing function (boolean)
-        @param exceptions flag to enable exception reporting of the IDE
-            (boolean)
-        @param tracePython flag to enable tracing into the Python library
-            (boolean)
-        @param redirect flag indicating redirection of stdin, stdout and
-            stderr (boolean)
-        """
-        global debugClient
-        if host is None:
-            host = os.getenv('ERICHOST', 'localhost')
-        if port is None:
-            port = os.getenv('ERICPORT', 42424)
-        
-        remoteAddress = self.__resolveHost(host)
-        self.connectDebugger(port, remoteAddress, redirect)
-        if filename is not None:
-            self.running = os.path.abspath(filename)
-        else:
-            try:
-                self.running = os.path.abspath(sys.argv[0])
-            except IndexError:
-                self.running = None
-        if self.running:
-            self.__setCoding(self.running)
-        self.passive = 1
-        self.write("%s%s|%d\n" % (
-            DebugProtocol.PassiveStartup, self.running, exceptions))
-        self.__interact()
-        
-        # setup the debugger variables
-        self._fncache = {}
-        self.dircache = []
-        self.mainFrame = None
-        self.inRawMode = 0
-        self.debugging = 1
-        
-        self.attachThread(mainThread=1)
-        self.mainThread.tracePython = tracePython
-        
-        # set the system exception handling function to ensure, that
-        # we report on all unhandled exceptions
-        sys.excepthook = self.__unhandled_exception
-        self.__interceptSignals()
-        
-        # now start debugging
-        if enableTrace:
-            self.mainThread.set_trace()
-        
-    def startProgInDebugger(self, progargs, wd='', host=None,
-                            port=None, exceptions=1, tracePython=0,
-                            redirect=1):
-        """
-        Public method used to start the remote debugger.
-        
-        @param progargs commandline for the program to be debugged
-            (list of strings)
-        @param wd working directory for the program execution (string)
-        @param host hostname of the debug server (string)
-        @param port portnumber of the debug server (int)
-        @param exceptions flag to enable exception reporting of the IDE
-            (boolean)
-        @param tracePython flag to enable tracing into the Python library
-            (boolean)
-        @param redirect flag indicating redirection of stdin, stdout and
-            stderr (boolean)
-        """
-        if host is None:
-            host = os.getenv('ERICHOST', 'localhost')
-        if port is None:
-            port = os.getenv('ERICPORT', 42424)
-        
-        remoteAddress = self.__resolveHost(host)
-        self.connectDebugger(port, remoteAddress, redirect)
-        
-        self._fncache = {}
-        self.dircache = []
-        sys.argv = progargs[:]
-        sys.argv[0] = os.path.abspath(sys.argv[0])
-        sys.path = self.__getSysPath(os.path.dirname(sys.argv[0]))
-        if wd == '':
-            os.chdir(sys.path[1])
-        else:
-            os.chdir(wd)
-        self.running = sys.argv[0]
-        self.__setCoding(self.running)
-        self.mainFrame = None
-        self.inRawMode = 0
-        self.debugging = 1
-        
-        self.passive = 1
-        self.write("%s%s|%d\n" % (
-            DebugProtocol.PassiveStartup, self.running, exceptions))
-        self.__interact()
-        
-        self.attachThread(mainThread=1)
-        self.mainThread.tracePython = tracePython
-        
-        # set the system exception handling function to ensure, that
-        # we report on all unhandled exceptions
-        sys.excepthook = self.__unhandled_exception
-        self.__interceptSignals()
-        
-        # This will eventually enter a local event loop.
-        # Note the use of backquotes to cause a repr of self.running. The
-        # need for this is on Windows os where backslash is the path separator.
-        # They will get inadvertantly stripped away during the eval causing
-        # IOErrors if self.running is passed as a normal str.
-        self.debugMod.__dict__['__file__'] = self.running
-        sys.modules['__main__'] = self.debugMod
-        res = self.mainThread.run('execfile(' + repr(self.running) + ')',
-                                  self.debugMod.__dict__)
-        self.progTerminated(res)
-
-    def run_call(self, scriptname, func, *args):
-        """
-        Public method used to start the remote debugger and call a function.
-        
-        @param scriptname name of the script to be debugged (string)
-        @param func function to be called
-        @param *args arguments being passed to func
-        @return result of the function call
-        """
-        self.startDebugger(scriptname, enableTrace=0)
-        res = self.mainThread.runcall(func, *args)
-        self.progTerminated(res)
-        return res
-        
-    def __resolveHost(self, host):
-        """
-        Private method to resolve a hostname to an IP address.
-        
-        @param host hostname of the debug server (string)
-        @return IP address (string)
-        """
-        try:
-            host, version = host.split("@@")
-        except ValueError:
-            version = 'v4'
-        if version == 'v4':
-            family = socket.AF_INET
-        else:
-            family = socket.AF_INET6
-        return socket.getaddrinfo(host, None, family,
-                                  socket.SOCK_STREAM)[0][4][0]
-        
-    def main(self):
-        """
-        Public method implementing the main method.
-        """
-        if '--' in sys.argv:
-            args = sys.argv[1:]
-            host = None
-            port = None
-            wd = ''
-            tracePython = 0
-            exceptions = 1
-            redirect = 1
-            while args[0]:
-                if args[0] == '-h':
-                    host = args[1]
-                    del args[0]
-                    del args[0]
-                elif args[0] == '-p':
-                    port = int(args[1])
-                    del args[0]
-                    del args[0]
-                elif args[0] == '-w':
-                    wd = args[1]
-                    del args[0]
-                    del args[0]
-                elif args[0] == '-t':
-                    tracePython = 1
-                    del args[0]
-                elif args[0] == '-e':
-                    exceptions = 0
-                    del args[0]
-                elif args[0] == '-n':
-                    redirect = 0
-                    del args[0]
-                elif args[0] == '--no-encoding':
-                    self.noencoding = True
-                    del args[0]
-                elif args[0] == '--fork-child':
-                    self.fork_auto = True
-                    self.fork_child = True
-                    del args[0]
-                elif args[0] == '--fork-parent':
-                    self.fork_auto = True
-                    self.fork_child = False
-                    del args[0]
-                elif args[0] == '--':
-                    del args[0]
-                    break
-                else:   # unknown option
-                    del args[0]
-            if not args:
-                print "No program given. Aborting!"     # __IGNORE_WARNING__
-            else:
-                if not self.noencoding:
-                    self.__coding = self.defaultCoding
-                self.startProgInDebugger(args, wd, host, port,
-                                         exceptions=exceptions,
-                                         tracePython=tracePython,
-                                         redirect=redirect)
-        else:
-            if sys.argv[1] == '--no-encoding':
-                self.noencoding = True
-                del sys.argv[1]
-            if sys.argv[1] == '':
-                del sys.argv[1]
-            try:
-                port = int(sys.argv[1])
-            except (ValueError, IndexError):
-                port = -1
-            try:
-                redirect = int(sys.argv[2])
-            except (ValueError, IndexError):
-                redirect = 1
-            try:
-                ipOrHost = sys.argv[3]
-                if ':' in ipOrHost:
-                    remoteAddress = ipOrHost
-                elif ipOrHost[0] in '0123456789':
-                    remoteAddress = ipOrHost
-                else:
-                    remoteAddress = self.__resolveHost(ipOrHost)
-            except Exception:
-                remoteAddress = None
-            sys.argv = ['']
-            if '' not in sys.path:
-                sys.path.insert(0, '')
-            if port >= 0:
-                if not self.noencoding:
-                    self.__coding = self.defaultCoding
-                self.connectDebugger(port, remoteAddress, redirect)
-                self.__interact()
-            else:
-                print "No network port given. Aborting..."  # __IGNORE_WARNING__
-        
-    def fork(self):
-        """
-        Public method implementing a fork routine deciding which branch to
-        follow.
-        
-        @return process ID (integer)
-        """
-        if not self.fork_auto:
-            self.write(DebugProtocol.RequestForkTo + '\n')
-            self.eventLoop(True)
-        pid = DebugClientOrigFork()
-        if pid == 0:
-            # child
-            if not self.fork_child:
-                sys.settrace(None)
-                sys.setprofile(None)
-                self.sessionClose(0)
-        else:
-            # parent
-            if self.fork_child:
-                sys.settrace(None)
-                sys.setprofile(None)
-                self.sessionClose(0)
-        return pid
-        
-    def close(self, fd):
-        """
-        Public method implementing a close method as a replacement for
-        os.close().
-        
-        It prevents the debugger connections from being closed.
-        
-        @param fd file descriptor to be closed (integer)
-        """
-        if fd in [self.readstream.fileno(), self.writestream.fileno(),
-                  self.errorstream.fileno()]:
-            return
-        
-        DebugClientOrigClose(fd)
-        
-    def __getSysPath(self, firstEntry):
-        """
-        Private slot to calculate a path list including the PYTHONPATH
-        environment variable.
-        
-        @param firstEntry entry to be put first in sys.path (string)
-        @return path list for use as sys.path (list of strings)
-        """
-        sysPath = [path for path in os.environ.get("PYTHONPATH", "")
-                   .split(os.pathsep)
-                   if path not in sys.path] + sys.path[:]
-        if "" in sysPath:
-            sysPath.remove("")
-        sysPath.insert(0, firstEntry)
-        sysPath.insert(0, '')
-        return sysPath
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DebugClientCapabilities.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2005 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module defining the debug clients capabilities.
-"""
-
-HasDebugger = 0x0001
-HasInterpreter = 0x0002
-HasProfiler = 0x0004
-HasCoverage = 0x0008
-HasCompleter = 0x0010
-HasUnittest = 0x0020
-HasShell = 0x0040
-
-HasAll = HasDebugger | HasInterpreter | HasProfiler | \
-    HasCoverage | HasCompleter | HasUnittest | HasShell
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DebugClientThreads.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,203 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2003 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing the multithreaded version of the debug client.
-"""
-
-import thread
-import sys
-
-from AsyncIO import AsyncIO
-from DebugThread import DebugThread
-import DebugClientBase
-
-
-def _debugclient_start_new_thread(target, args, kwargs={}):
-    """
-    Module function used to allow for debugging of multiple threads.
-    
-    The way it works is that below, we reset thread._start_new_thread to
-    this function object. Thus, providing a hook for us to see when
-    threads are started. From here we forward the request onto the
-    DebugClient which will create a DebugThread object to allow tracing
-    of the thread then start up the thread. These actions are always
-    performed in order to allow dropping into debug mode.
-    
-    See DebugClientThreads.attachThread and DebugThread.DebugThread in
-    DebugThread.py
-    
-    @param target the start function of the target thread (i.e. the user code)
-    @param args arguments to pass to target
-    @param kwargs keyword arguments to pass to target
-    @return The identifier of the created thread
-    """
-    if DebugClientBase.DebugClientInstance is not None:
-        return DebugClientBase.DebugClientInstance.attachThread(
-            target, args, kwargs)
-    else:
-        return _original_start_thread(target, args, kwargs)
-    
-# make thread hooks available to system
-_original_start_thread = thread.start_new_thread
-thread.start_new_thread = _debugclient_start_new_thread
-
-# Note: import threading here AFTER above hook, as threading cache's
-#       thread._start_new_thread.
-from threading import RLock
-
-
-class DebugClientThreads(DebugClientBase.DebugClientBase, AsyncIO):
-    """
-    Class implementing the client side of the debugger.
-
-    This variant of the debugger implements a threaded debugger client
-    by subclassing all relevant base classes.
-    """
-    def __init__(self):
-        """
-        Constructor
-        """
-        AsyncIO.__init__(self)
-        
-        DebugClientBase.DebugClientBase.__init__(self)
-        
-        # protection lock for synchronization
-        self.clientLock = RLock()
-        
-        # the "current" thread, basically the thread we are at a breakpoint
-        # for.
-        self.currentThread = None
-        
-        # special objects representing the main scripts thread and frame
-        self.mainThread = None
-        self.mainFrame = None
-        
-        self.variant = 'Threaded'
-
-    def attachThread(self, target=None, args=None, kwargs=None, mainThread=0):
-        """
-        Public method to setup a thread for DebugClient to debug.
-        
-        If mainThread is non-zero, then we are attaching to the already
-        started mainthread of the app and the rest of the args are ignored.
-        
-        @param target the start function of the target thread (i.e. the
-            user code)
-        @param args arguments to pass to target
-        @param kwargs keyword arguments to pass to target
-        @param mainThread non-zero, if we are attaching to the already
-              started mainthread of the app
-        @return The identifier of the created thread
-        """
-        try:
-            self.lockClient()
-            newThread = DebugThread(self, target, args, kwargs, mainThread)
-            ident = -1
-            if mainThread:
-                ident = thread.get_ident()
-                self.mainThread = newThread
-                if self.debugging:
-                    sys.setprofile(newThread.profile)
-            else:
-                ident = _original_start_thread(newThread.bootstrap, ())
-                if self.mainThread is not None:
-                    self.tracePython = self.mainThread.tracePython
-            newThread.set_ident(ident)
-            self.threads[newThread.get_ident()] = newThread
-        finally:
-            self.unlockClient()
-        return ident
-    
-    def threadTerminated(self, dbgThread):
-        """
-        Public method called when a DebugThread has exited.
-        
-        @param dbgThread the DebugThread that has exited
-        """
-        try:
-            self.lockClient()
-            try:
-                del self.threads[dbgThread.get_ident()]
-            except KeyError:
-                pass
-        finally:
-            self.unlockClient()
-            
-    def lockClient(self, blocking=1):
-        """
-        Public method to acquire the lock for this client.
-        
-        @param blocking flag to indicating a blocking lock
-        @return flag indicating successful locking
-        """
-        if blocking:
-            self.clientLock.acquire()
-        else:
-            return self.clientLock.acquire(blocking)
-        
-    def unlockClient(self):
-        """
-        Public method to release the lock for this client.
-        """
-        try:
-            self.clientLock.release()
-        except AssertionError:
-            pass
-        
-    def setCurrentThread(self, id):
-        """
-        Public method to set the current thread.
-
-        @param id the id the current thread should be set to.
-        """
-        try:
-            self.lockClient()
-            if id is None:
-                self.currentThread = None
-            else:
-                self.currentThread = self.threads[id]
-        finally:
-            self.unlockClient()
-    
-    def eventLoop(self, disablePolling=False):
-        """
-        Public method implementing our event loop.
-        
-        @param disablePolling flag indicating to enter an event loop with
-            polling disabled (boolean)
-        """
-        # make sure we set the current thread appropriately
-        threadid = thread.get_ident()
-        self.setCurrentThread(threadid)
-        
-        DebugClientBase.DebugClientBase.eventLoop(self, disablePolling)
-        
-        self.setCurrentThread(None)
-
-    def set_quit(self):
-        """
-        Public method to do a 'set quit' on all threads.
-        """
-        try:
-            locked = self.lockClient(0)
-            try:
-                for key in self.threads.keys():
-                    self.threads[key].set_quit()
-            except Exception:
-                pass
-        finally:
-            if locked:
-                self.unlockClient()
-
-# We are normally called by the debugger to execute directly.
-
-if __name__ == '__main__':
-    debugClient = DebugClientThreads()
-    debugClient.main()
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702, E402
--- a/DebugClients/Python/DebugConfig.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2005 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module defining type strings for the different Python types.
-"""
-
-ConfigVarTypeStrings = [
-    '__', 'NoneType', 'type',
-    'bool', 'int', 'long', 'float', 'complex',
-    'str', 'unicode', 'tuple', 'list',
-    'dict', 'dict-proxy', 'set', 'file', 'xrange',
-    'slice', 'buffer', 'class', 'instance',
-    'instance method', 'property', 'generator',
-    'function', 'builtin_function_or_method', 'code', 'module',
-    'ellipsis', 'traceback', 'frame', 'other'
-]
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DebugProtocol.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2002 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module defining the debug protocol tokens.
-"""
-
-# The address used for debugger/client communications.
-DebugAddress = '127.0.0.1'
-
-# The protocol "words".
-RequestOK = '>OK?<'
-RequestEnv = '>Environment<'
-RequestCapabilities = '>Capabilities<'
-RequestLoad = '>Load<'
-RequestRun = '>Run<'
-RequestCoverage = '>Coverage<'
-RequestProfile = '>Profile<'
-RequestContinue = '>Continue<'
-RequestStep = '>Step<'
-RequestStepOver = '>StepOver<'
-RequestStepOut = '>StepOut<'
-RequestStepQuit = '>StepQuit<'
-RequestBreak = '>Break<'
-RequestBreakEnable = '>EnableBreak<'
-RequestBreakIgnore = '>IgnoreBreak<'
-RequestWatch = '>Watch<'
-RequestWatchEnable = '>EnableWatch<'
-RequestWatchIgnore = '>IgnoreWatch<'
-RequestVariables = '>Variables<'
-RequestVariable = '>Variable<'
-RequestSetFilter = '>SetFilter<'
-RequestThreadList = '>ThreadList<'
-RequestThreadSet = '>ThreadSet<'
-RequestEval = '>Eval<'
-RequestExec = '>Exec<'
-RequestShutdown = '>Shutdown<'
-RequestBanner = '>Banner<'
-RequestCompletion = '>Completion<'
-RequestUTPrepare = '>UTPrepare<'
-RequestUTRun = '>UTRun<'
-RequestUTStop = '>UTStop<'
-RequestForkTo = '>ForkTo<'
-RequestForkMode = '>ForkMode<'
-
-ResponseOK = '>OK<'
-ResponseCapabilities = RequestCapabilities
-ResponseContinue = '>Continue<'
-ResponseException = '>Exception<'
-ResponseSyntax = '>SyntaxError<'
-ResponseSignal = '>Signal<'
-ResponseExit = '>Exit<'
-ResponseLine = '>Line<'
-ResponseRaw = '>Raw<'
-ResponseClearBreak = '>ClearBreak<'
-ResponseBPConditionError = '>BPConditionError<'
-ResponseClearWatch = '>ClearWatch<'
-ResponseWPConditionError = '>WPConditionError<'
-ResponseVariables = RequestVariables
-ResponseVariable = RequestVariable
-ResponseThreadList = RequestThreadList
-ResponseThreadSet = RequestThreadSet
-ResponseStack = '>CurrentStack<'
-ResponseBanner = RequestBanner
-ResponseCompletion = RequestCompletion
-ResponseUTPrepared = '>UTPrepared<'
-ResponseUTStartTest = '>UTStartTest<'
-ResponseUTStopTest = '>UTStopTest<'
-ResponseUTTestFailed = '>UTTestFailed<'
-ResponseUTTestErrored = '>UTTestErrored<'
-ResponseUTTestSkipped = '>UTTestSkipped<'
-ResponseUTTestFailedExpected = '>UTTestFailedExpected<'
-ResponseUTTestSucceededUnexpected = '>UTTestSucceededUnexpected<'
-ResponseUTFinished = '>UTFinished<'
-ResponseForkTo = RequestForkTo
-
-PassiveStartup = '>PassiveStartup<'
-
-RequestCallTrace = '>CallTrace<'
-CallTrace = '>CallTrace<'
-
-EOT = '>EOT<\n'
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/DebugThread.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2002 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing the debug thread.
-"""
-
-import bdb
-import sys
-
-from DebugBase import DebugBase
-
-
-class DebugThread(DebugBase):
-    """
-    Class implementing a debug thread.
-
-    It represents a thread in the python interpreter that we are tracing.
-    
-    Provides simple wrapper methods around bdb for the 'owning' client to
-    call to step etc.
-    """
-    def __init__(self, dbgClient, targ=None, args=None, kwargs=None,
-                 mainThread=0):
-        """
-        Constructor
-        
-        @param dbgClient the owning client
-        @param targ the target method in the run thread
-        @param args  arguments to be passed to the thread
-        @param kwargs arguments to be passed to the thread
-        @param mainThread 0 if this thread is not the mainscripts thread
-        """
-        DebugBase.__init__(self, dbgClient)
-        
-        self._target = targ
-        self._args = args
-        self._kwargs = kwargs
-        self._mainThread = mainThread
-        # thread running tracks execution state of client code
-        # it will always be 0 for main thread as that is tracked
-        # by DebugClientThreads and Bdb...
-        self._threadRunning = 0
-        
-        self.__ident = None  # id of this thread.
-        self.__name = ""
-        self.tracePython = False
-    
-    def set_ident(self, id):
-        """
-        Public method to set the id for this thread.
-        
-        @param id id for this thread (int)
-        """
-        self.__ident = id
-    
-    def get_ident(self):
-        """
-        Public method to return the id of this thread.
-        
-        @return the id of this thread (int)
-        """
-        return self.__ident
-    
-    def get_name(self):
-        """
-        Public method to return the name of this thread.
-        
-        @return name of this thread (string)
-        """
-        return self.__name
-    
-    def traceThread(self):
-        """
-        Public method to setup tracing for this thread.
-        """
-        self.set_trace()
-        if not self._mainThread:
-            self.set_continue(0)
-    
-    def bootstrap(self):
-        """
-        Public method to bootstrap the thread.
-        
-        It wraps the call to the user function to enable tracing
-        before hand.
-        """
-        try:
-            try:
-                self._threadRunning = 1
-                self.traceThread()
-                self._target(*self._args, **self._kwargs)
-            except bdb.BdbQuit:
-                pass
-        finally:
-            self._threadRunning = 0
-            self.quitting = 1
-            self._dbgClient.threadTerminated(self)
-            sys.settrace(None)
-            sys.setprofile(None)
-    
-    def trace_dispatch(self, frame, event, arg):
-        """
-        Public method wrapping the trace_dispatch of bdb.py.
-        
-        It wraps the call to dispatch tracing into
-        bdb to make sure we have locked the client to prevent multiple
-        threads from entering the client event loop.
-        
-        @param frame The current stack frame.
-        @param event The trace event (string)
-        @param arg The arguments
-        @return local trace function
-        """
-        try:
-            self._dbgClient.lockClient()
-            # if this thread came out of a lock, and we are quitting
-            # and we are still running, then get rid of tracing for this thread
-            if self.quitting and self._threadRunning:
-                sys.settrace(None)
-                sys.setprofile(None)
-            import threading
-            self.__name = threading.currentThread().getName()
-            retval = DebugBase.trace_dispatch(self, frame, event, arg)
-        finally:
-            self._dbgClient.unlockClient()
-        
-        return retval
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/FlexCompleter.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,275 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-Word completion for the eric6 shell.
-
-<h4>NOTE for eric6 variant</h4>
-
-    This version is a re-implementation of FlexCompleter
-    as found in the PyQwt package. It is modified to work with the eric6 debug
-    clients.
-
-
-<h4>NOTE for the PyQwt variant</h4>
-
-    This version is a re-implementation of FlexCompleter
-    with readline support for PyQt&sip-3.6 and earlier.
-
-    Full readline support is present in PyQt&sip-snapshot-20030531 and later.
-
-
-<h4>NOTE for FlexCompleter</h4>
-
-    This version is a re-implementation of rlcompleter with
-    selectable namespace.
-
-    The problem with rlcompleter is that it's hardwired to work with
-    __main__.__dict__, and in some cases one may have 'sandboxed' namespaces.
-    So this class is a ripoff of rlcompleter, with the namespace to work in as
-    an optional parameter.
-    
-    This class can be used just like rlcompleter, but the Completer class now
-    has a constructor with the optional 'namespace' parameter.
-    
-    A patch has been submitted to Python@sourceforge for these changes to go in
-    the standard Python distribution.
-
-
-<h4>Original rlcompleter documentation</h4>
-
-    This requires the latest extension to the readline module (the
-    completes keywords, built-ins and globals in __main__; when completing
-    NAME.NAME..., it evaluates (!) the expression up to the last dot and
-    completes its attributes.
-    
-    It's very cool to do "import string" type "string.", hit the
-    completion key (twice), and see the list of names defined by the
-    string module!
-    
-    Tip: to use the tab key as the completion key, call
-    
-    'readline.parse_and_bind("tab: complete")'
-    
-    <b>Notes</b>:
-    <ul>
-    <li>
-    Exceptions raised by the completer function are *ignored* (and
-    generally cause the completion to fail).  This is a feature -- since
-    readline sets the tty device in raw (or cbreak) mode, printing a
-    traceback wouldn't work well without some complicated hoopla to save,
-    reset and restore the tty state.
-    </li>
-    <li>
-    The evaluation of the NAME.NAME... form may cause arbitrary
-    application defined code to be executed if an object with a
-    __getattr__ hook is found.  Since it is the responsibility of the
-    application (or the user) to enable this feature, I consider this an
-    acceptable risk.  More complicated expressions (e.g. function calls or
-    indexing operations) are *not* evaluated.
-    </li>
-    <li>
-    GNU readline is also used by the built-in functions input() and
-    raw_input(), and thus these also benefit/suffer from the completer
-    features.  Clearly an interactive application can benefit by
-    specifying its own completer function and using raw_input() for all
-    its input.
-    </li>
-    <li>
-    When the original stdin is not a tty device, GNU readline is never
-    used, and this module (and the readline module) are silently inactive.
-    </li>
-    </ul>
-"""
-
-#*****************************************************************************
-#
-# Since this file is essentially a minimally modified copy of the rlcompleter
-# module which is part of the standard Python distribution, I assume that the
-# proper procedure is to maintain its copyright as belonging to the Python
-# Software Foundation:
-#
-#       Copyright (C) 2001 Python Software Foundation, www.python.org
-#
-#  Distributed under the terms of the Python Software Foundation license.
-#
-#  Full text available at:
-#
-#                  http://www.python.org/2.1/license.html
-#
-#*****************************************************************************
-
-import __builtin__
-import __main__
-
-__all__ = ["Completer"]
-
-
-class Completer(object):
-    """
-    Class implementing the command line completer object.
-    """
-    def __init__(self, namespace=None):
-        """
-        Constructor
-
-        Completer([namespace]) -> completer instance.
-
-        If unspecified, the default namespace where completions are performed
-        is __main__ (technically, __main__.__dict__). Namespaces should be
-        given as dictionaries.
-
-        Completer instances should be used as the completion mechanism of
-        readline via the set_completer() call:
-
-        readline.set_completer(Completer(my_namespace).complete)
-        
-        @param namespace namespace for the completer
-        @exception TypeError raised to indicate a wrong namespace structure
-        """
-        if namespace and not isinstance(namespace, dict):
-            raise TypeError('namespace must be a dictionary')
-
-        # Don't bind to namespace quite yet, but flag whether the user wants a
-        # specific namespace or to use __main__.__dict__. This will allow us
-        # to bind to __main__.__dict__ at completion time, not now.
-        if namespace is None:
-            self.use_main_ns = 1
-        else:
-            self.use_main_ns = 0
-            self.namespace = namespace
-
-    def complete(self, text, state):
-        """
-        Public method to return the next possible completion for 'text'.
-
-        This is called successively with state == 0, 1, 2, ... until it
-        returns None.  The completion should begin with 'text'.
-        
-        @param text The text to be completed. (string)
-        @param state The state of the completion. (integer)
-        @return The possible completions as a list of strings.
-        """
-        if self.use_main_ns:
-            self.namespace = __main__.__dict__
-            
-        if state == 0:
-            if "." in text:
-                self.matches = self.attr_matches(text)
-            else:
-                self.matches = self.global_matches(text)
-        try:
-            return self.matches[state]
-        except IndexError:
-            return None
-
-    def _callable_postfix(self, val, word):
-        """
-        Protected method to check for a callable.
-        
-        @param val value to check (object)
-        @param word word to ammend (string)
-        @return ammended word (string)
-        """
-        if hasattr(val, '__call__'):
-            word = word + "("
-        return word
-
-    def global_matches(self, text):
-        """
-        Public method to compute matches when text is a simple name.
-
-        @param text The text to be completed. (string)
-        @return A list of all keywords, built-in functions and names currently
-        defined in self.namespace that match.
-        """
-        import keyword
-        matches = []
-        n = len(text)
-        for word in keyword.kwlist:
-            if word[:n] == text:
-                matches.append(word)
-        for nspace in [__builtin__.__dict__, self.namespace]:
-            for word, val in nspace.items():
-                if word[:n] == text and word != "__builtins__":
-                    matches.append(self._callable_postfix(val, word))
-        return matches
-
-    def attr_matches(self, text):
-        """
-        Public method to compute matches when text contains a dot.
-
-        Assuming the text is of the form NAME.NAME....[NAME], and is
-        evaluatable in self.namespace, it will be evaluated and its attributes
-        (as revealed by dir()) are used as possible completions.  (For class
-        instances, class members are are also considered.)
-
-        <b>WARNING</b>: this can still invoke arbitrary C code, if an object
-        with a __getattr__ hook is evaluated.
-        
-        @param text The text to be completed. (string)
-        @return A list of all matches.
-        """
-        import re
-
-    # Testing. This is the original code:
-    #m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
-
-    # Modified to catch [] in expressions:
-    #m = re.match(r"([\w\[\]]+(\.[\w\[\]]+)*)\.(\w*)", text)
-
-        # Another option, seems to work great. Catches things like ''.<tab>
-        m = re.match(r"(\S+(\.\w+)*)\.(\w*)", text)
-
-        if not m:
-            return
-        expr, attr = m.group(1, 3)
-        try:
-            thisobject = eval(expr, self.namespace)
-        except Exception:
-            return []
-
-        # get the content of the object, except __builtins__
-        words = dir(thisobject)
-        if "__builtins__" in words:
-            words.remove("__builtins__")
-
-        if hasattr(object, '__class__'):
-            words.append('__class__')
-            words = words + get_class_members(object.__class__)
-        matches = []
-        n = len(attr)
-        for word in words:
-            try:
-                if word[:n] == attr and hasattr(thisobject, word):
-                    val = getattr(thisobject, word)
-                    word = self._callable_postfix(
-                        val, "%s.%s" % (expr, word))
-                    matches.append(word)
-            except Exception:
-                # some badly behaved objects pollute dir() with non-strings,
-                # which cause the completion to fail.  This way we skip the
-                # bad entries and can still continue processing the others.
-                pass
-        return matches
-
-
-def get_class_members(klass):
-    """
-    Module function to retrieve the class members.
-    
-    @param klass The class object to be analysed.
-    @return A list of all names defined in the class.
-    """
-    # PyQwt's hack for PyQt&sip-3.6 and earlier
-    if hasattr(klass, 'getLazyNames'):
-        return klass.getLazyNames()
-    # vanilla Python stuff
-    ret = dir(klass)
-    if hasattr(klass, '__bases__'):
-        for base in klass.__bases__:
-            ret = ret + get_class_members(base)
-    return ret
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702, M111
--- a/DebugClients/Python/PyProfile.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,172 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2002 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-
-"""
-Module defining additions to the standard Python profile.py.
-"""
-
-import os
-import marshal
-import profile
-import atexit
-import pickle
-
-
-class PyProfile(profile.Profile):
-    """
-    Class extending the standard Python profiler with additional methods.
-    
-    This class extends the standard Python profiler by the functionality to
-    save the collected timing data in a timing cache, to restore these data
-    on subsequent calls, to store a profile dump to a standard filename and
-    to erase these caches.
-    """
-    def __init__(self, basename, timer=None, bias=None):
-        """
-        Constructor
-        
-        @param basename name of the script to be profiled (string)
-        @param timer function defining the timing calculation
-        @param bias calibration value (float)
-        """
-        try:
-            profile.Profile.__init__(self, timer, bias)
-        except TypeError:
-            profile.Profile.__init__(self, timer)
-        
-        self.dispatch = self.__class__.dispatch
-        
-        basename = os.path.splitext(basename)[0]
-        self.profileCache = "%s.profile" % basename
-        self.timingCache = "%s.timings" % basename
-        
-        self.__restore()
-        atexit.register(self.save)
-        
-    def __restore(self):
-        """
-        Private method to restore the timing data from the timing cache.
-        """
-        if not os.path.exists(self.timingCache):
-            return
-            
-        try:
-            cache = open(self.timingCache, 'rb')
-            timings = marshal.load(cache)
-            cache.close()
-            if isinstance(timings, type.DictType):
-                self.timings = timings
-        except Exception:
-            pass
-        
-    def save(self):
-        """
-        Public method to store the collected profile data.
-        """
-        # dump the raw timing data
-        cache = open(self.timingCache, 'wb')
-        marshal.dump(self.timings, cache)
-        cache.close()
-        
-        # dump the profile data
-        self.dump_stats(self.profileCache)
-        
-    def dump_stats(self, file):
-        """
-        Public method to dump the statistics data.
-        
-        @param file name of the file to write to (string)
-        """
-        try:
-            f = open(file, 'wb')
-            self.create_stats()
-            pickle.dump(self.stats, f, 2)
-        except (EnvironmentError, pickle.PickleError):
-            pass
-        finally:
-            f.close()
-
-    def erase(self):
-        """
-        Public method to erase the collected timing data.
-        """
-        self.timings = {}
-        if os.path.exists(self.timingCache):
-            os.remove(self.timingCache)
-
-    def fix_frame_filename(self, frame):
-        """
-        Public method used to fixup the filename for a given frame.
-        
-        The logic employed here is that if a module was loaded
-        from a .pyc file, then the correct .py to operate with
-        should be in the same path as the .pyc. The reason this
-        logic is needed is that when a .pyc file is generated, the
-        filename embedded and thus what is readable in the code object
-        of the frame object is the fully qualified filepath when the
-        pyc is generated. If files are moved from machine to machine
-        this can break debugging as the .pyc will refer to the .py
-        on the original machine. Another case might be sharing
-        code over a network... This logic deals with that.
-        
-        @param frame the frame object
-        @return fixed up file name (string)
-        """
-        # get module name from __file__
-        if not isinstance(frame, profile.Profile.fake_frame) and \
-                '__file__' in frame.f_globals:
-            root, ext = os.path.splitext(frame.f_globals['__file__'])
-            if ext == '.pyc' or ext == '.py':
-                fixedName = root + '.py'
-                if os.path.exists(fixedName):
-                    return fixedName
-
-        return frame.f_code.co_filename
-
-    def trace_dispatch_call(self, frame, t):
-        """
-        Public method used to trace functions calls.
-        
-        This is a variant of the one found in the standard Python
-        profile.py calling fix_frame_filename above.
-        
-        @param frame reference to the call frame
-        @param t arguments of the call
-        @return flag indicating a handled call
-        """
-        if self.cur and frame.f_back is not self.cur[-2]:
-            rpt, rit, ret, rfn, rframe, rcur = self.cur
-            if not isinstance(rframe, profile.Profile.fake_frame):
-                assert rframe.f_back is frame.f_back, ("Bad call", rfn,
-                                                       rframe, rframe.f_back,
-                                                       frame, frame.f_back)
-                self.trace_dispatch_return(rframe, 0)
-                assert (self.cur is None or
-                        frame.f_back is self.cur[-2]), ("Bad call",
-                                                        self.cur[-3])
-        fcode = frame.f_code
-        fn = (self.fix_frame_filename(frame),
-              fcode.co_firstlineno, fcode.co_name)
-        self.cur = (t, 0, 0, fn, frame, self.cur)
-        timings = self.timings
-        if fn in timings:
-            cc, ns, tt, ct, callers = timings[fn]
-            timings[fn] = cc, ns + 1, tt, ct, callers
-        else:
-            timings[fn] = 0, 0, 0, 0, {}
-        return 1
-    
-    dispatch = {
-        "call": trace_dispatch_call,
-        "exception": profile.Profile.trace_dispatch_exception,
-        "return": profile.Profile.trace_dispatch_return,
-        "c_call": profile.Profile.trace_dispatch_c_call,
-        "c_exception": profile.Profile.trace_dispatch_return,
-        # the C function returned
-        "c_return": profile.Profile.trace_dispatch_return,
-    }
-
-#
-# eflag: FileType = Python2
-# eflag: noqa = M601, M702
--- a/DebugClients/Python/__init__.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2005 - 2016 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Package implementing the Python debugger.
-
-It consists of different kinds of debug clients.
-"""
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/__init__.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Code coverage measurement for Python.
-
-Ned Batchelder
-http://nedbatchelder.com/code/coverage
-
-"""
-
-from coverage.version import __version__, __url__, version_info
-
-from coverage.control import Coverage, process_startup
-from coverage.data import CoverageData
-from coverage.misc import CoverageException
-from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
-from coverage.pytracer import PyTracer
-
-# Backward compatibility.
-coverage = Coverage
-
-# On Windows, we encode and decode deep enough that something goes wrong and
-# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
-# Adding a reference here prevents it from being unloaded.  Yuk.
-import encodings.utf_8
-
-# Because of the "from coverage.control import fooey" lines at the top of the
-# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
-# This makes some inspection tools (like pydoc) unable to find the class
-# coverage.coverage.  So remove that entry.
-import sys
-try:
-    del sys.modules['coverage.coverage']
-except KeyError:
-    pass
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/__main__.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,11 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Coverage.py's main entry point."""
-
-import sys
-from coverage.cmdline import main
-sys.exit(main())
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/annotate.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,106 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Source file annotation for coverage.py."""
-
-import io
-import os
-import re
-
-from coverage.files import flat_rootname
-from coverage.misc import isolate_module
-from coverage.report import Reporter
-
-os = isolate_module(os)
-
-
-class AnnotateReporter(Reporter):
-    """Generate annotated source files showing line coverage.
-
-    This reporter creates annotated copies of the measured source files. Each
-    .py file is copied as a .py,cover file, with a left-hand margin annotating
-    each line::
-
-        > def h(x):
-        -     if 0:   #pragma: no cover
-        -         pass
-        >     if x == 1:
-        !         a = 1
-        >     else:
-        >         a = 2
-
-        > h(2)
-
-    Executed lines use '>', lines not executed use '!', lines excluded from
-    consideration use '-'.
-
-    """
-
-    def __init__(self, coverage, config):
-        super(AnnotateReporter, self).__init__(coverage, config)
-        self.directory = None
-
-    blank_re = re.compile(r"\s*(#|$)")
-    else_re = re.compile(r"\s*else\s*:\s*(#|$)")
-
-    def report(self, morfs, directory=None):
-        """Run the report.
-
-        See `coverage.report()` for arguments.
-
-        """
-        self.report_files(self.annotate_file, morfs, directory)
-
-    def annotate_file(self, fr, analysis):
-        """Annotate a single file.
-
-        `fr` is the FileReporter for the file to annotate.
-
-        """
-        statements = sorted(analysis.statements)
-        missing = sorted(analysis.missing)
-        excluded = sorted(analysis.excluded)
-
-        if self.directory:
-            dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
-            if dest_file.endswith("_py"):
-                dest_file = dest_file[:-3] + ".py"
-            dest_file += ",cover"
-        else:
-            dest_file = fr.filename + ",cover"
-
-        with io.open(dest_file, 'w', encoding='utf8') as dest:
-            i = 0
-            j = 0
-            covered = True
-            source = fr.source()
-            for lineno, line in enumerate(source.splitlines(True), start=1):
-                while i < len(statements) and statements[i] < lineno:
-                    i += 1
-                while j < len(missing) and missing[j] < lineno:
-                    j += 1
-                if i < len(statements) and statements[i] == lineno:
-                    covered = j >= len(missing) or missing[j] > lineno
-                if self.blank_re.match(line):
-                    dest.write(u'  ')
-                elif self.else_re.match(line):
-                    # Special logic for lines containing only 'else:'.
-                    if i >= len(statements) and j >= len(missing):
-                        dest.write(u'! ')
-                    elif i >= len(statements) or j >= len(missing):
-                        dest.write(u'> ')
-                    elif statements[i] == missing[j]:
-                        dest.write(u'! ')
-                    else:
-                        dest.write(u'> ')
-                elif lineno in excluded:
-                    dest.write(u'- ')
-                elif covered:
-                    dest.write(u'> ')
-                else:
-                    dest.write(u'! ')
-
-                dest.write(line)
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/backunittest.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Implementations of unittest features from the future."""
-
-# Use unittest2 if it's available, otherwise unittest.  This gives us
-# back-ported features for 2.6.
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
-
-
-def unittest_has(method):
-    """Does `unittest.TestCase` have `method` defined?"""
-    return hasattr(unittest.TestCase, method)
-
-
-class TestCase(unittest.TestCase):
-    """Just like unittest.TestCase, but with assert methods added.
-
-    Designed to be compatible with 3.1 unittest.  Methods are only defined if
-    `unittest` doesn't have them.
-
-    """
-    # pylint: disable=missing-docstring
-
-    # Many Pythons have this method defined.  But PyPy3 has a bug with it
-    # somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our
-    # own implementation that works everywhere, at least for the ways we're
-    # calling it.
-    def assertCountEqual(self, s1, s2):
-        """Assert these have the same elements, regardless of order."""
-        self.assertEqual(sorted(s1), sorted(s2))
-
-    if not unittest_has('assertRaisesRegex'):
-        def assertRaisesRegex(self, *args, **kwargs):
-            return self.assertRaisesRegexp(*args, **kwargs)
-
-    if not unittest_has('assertRegex'):
-        def assertRegex(self, *args, **kwargs):
-            return self.assertRegexpMatches(*args, **kwargs)
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/backward.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,175 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Add things to old Pythons so I can pretend they are newer."""
-
-# This file does lots of tricky stuff, so disable a bunch of pylint warnings.
-# pylint: disable=redefined-builtin
-# pylint: disable=unused-import
-# pylint: disable=no-name-in-module
-
-import sys
-
-from coverage import env
-
-
-# Pythons 2 and 3 differ on where to get StringIO.
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from io import StringIO
-
-# In py3, ConfigParser was renamed to the more-standard configparser
-try:
-    import configparser
-except ImportError:
-    import ConfigParser as configparser
-
-# What's a string called?
-try:
-    string_class = basestring
-except NameError:
-    string_class = str
-
-# What's a Unicode string called?
-try:
-    unicode_class = unicode
-except NameError:
-    unicode_class = str
-
-# Where do pickles come from?
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
-# range or xrange?
-try:
-    range = xrange
-except NameError:
-    range = range
-
-# shlex.quote is new, but there's an undocumented implementation in "pipes",
-# who knew!?
-try:
-    from shlex import quote as shlex_quote
-except ImportError:
-    # Useful function, available under a different (undocumented) name
-    # in Python versions earlier than 3.3.
-    from pipes import quote as shlex_quote
-
-# A function to iterate listlessly over a dict's items.
-try:
-    {}.iteritems
-except AttributeError:
-    def iitems(d):
-        """Produce the items from dict `d`."""
-        return d.items()
-else:
-    def iitems(d):
-        """Produce the items from dict `d`."""
-        return d.iteritems()
-
-# Getting the `next` function from an iterator is different in 2 and 3.
-try:
-    iter([]).next
-except AttributeError:
-    def iternext(seq):
-        """Get the `next` function for iterating over `seq`."""
-        return iter(seq).__next__
-else:
-    def iternext(seq):
-        """Get the `next` function for iterating over `seq`."""
-        return iter(seq).next
-
-# Python 3.x is picky about bytes and strings, so provide methods to
-# get them right, and make them no-ops in 2.x
-if env.PY3:
-    def to_bytes(s):
-        """Convert string `s` to bytes."""
-        return s.encode('utf8')
-
-    def binary_bytes(byte_values):
-        """Produce a byte string with the ints from `byte_values`."""
-        return bytes(byte_values)
-
-    def bytes_to_ints(bytes_value):
-        """Turn a bytes object into a sequence of ints."""
-        # In Python 3, iterating bytes gives ints.
-        return bytes_value
-
-else:
-    def to_bytes(s):
-        """Convert string `s` to bytes (no-op in 2.x)."""
-        return s
-
-    def binary_bytes(byte_values):
-        """Produce a byte string with the ints from `byte_values`."""
-        return "".join(chr(b) for b in byte_values)
-
-    def bytes_to_ints(bytes_value):
-        """Turn a bytes object into a sequence of ints."""
-        for byte in bytes_value:
-            yield ord(byte)
-
-
-try:
-    # In Python 2.x, the builtins were in __builtin__
-    BUILTINS = sys.modules['__builtin__']
-except KeyError:
-    # In Python 3.x, they're in builtins
-    BUILTINS = sys.modules['builtins']
-
-
-# imp was deprecated in Python 3.3
-try:
-    import importlib
-    import importlib.util
-    imp = None
-except ImportError:
-    importlib = None
-
-# We only want to use importlib if it has everything we need.
-try:
-    importlib_util_find_spec = importlib.util.find_spec
-except Exception:
-    import imp
-    importlib_util_find_spec = None
-
-# What is the .pyc magic number for this version of Python?
-try:
-    PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
-except AttributeError:
-    PYC_MAGIC_NUMBER = imp.get_magic()
-
-
-def import_local_file(modname, modfile=None):
-    """Import a local file as a module.
-
-    Opens a file in the current directory named `modname`.py, imports it
-    as `modname`, and returns the module object.  `modfile` is the file to
-    import if it isn't in the current directory.
-
-    """
-    try:
-        from importlib.machinery import SourceFileLoader
-    except ImportError:
-        SourceFileLoader = None
-
-    if modfile is None:
-        modfile = modname + '.py'
-    if SourceFileLoader:
-        mod = SourceFileLoader(modname, modfile).load_module()
-    else:
-        for suff in imp.get_suffixes():                 # pragma: part covered
-            if suff[0] == '.py':
-                break
-
-        with open(modfile, 'r') as f:
-            # pylint: disable=undefined-loop-variable
-            mod = imp.load_module(modname, f, modfile, suff)
-
-    return mod
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/bytecode.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Bytecode manipulation for coverage.py"""
-
-import types
-
-
-class CodeObjects(object):
-    """Iterate over all the code objects in `code`."""
-    def __init__(self, code):
-        self.stack = [code]
-
-    def __iter__(self):
-        while self.stack:
-            # We're going to return the code object on the stack, but first
-            # push its children for later returning.
-            code = self.stack.pop()
-            for c in code.co_consts:
-                if isinstance(c, types.CodeType):
-                    self.stack.append(c)
-            yield code
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/cmdline.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,766 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Command-line support for coverage.py."""
-
-import glob
-import optparse
-import os.path
-import sys
-import textwrap
-import traceback
-
-from coverage import env
-from coverage.collector import CTracer
-from coverage.execfile import run_python_file, run_python_module
-from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
-from coverage.debug import info_formatter, info_header
-
-
-class Opts(object):
-    """A namespace class for individual options we'll build parsers from."""
-
-    append = optparse.make_option(
-        '-a', '--append', action='store_true',
-        help="Append coverage data to .coverage, otherwise it is started clean with each run.",
-    )
-    branch = optparse.make_option(
-        '', '--branch', action='store_true',
-        help="Measure branch coverage in addition to statement coverage.",
-    )
-    CONCURRENCY_CHOICES = [
-        "thread", "gevent", "greenlet", "eventlet", "multiprocessing",
-    ]
-    concurrency = optparse.make_option(
-        '', '--concurrency', action='store', metavar="LIB",
-        choices=CONCURRENCY_CHOICES,
-        help=(
-            "Properly measure code using a concurrency library. "
-            "Valid values are: %s."
-        ) % ", ".join(CONCURRENCY_CHOICES),
-    )
-    debug = optparse.make_option(
-        '', '--debug', action='store', metavar="OPTS",
-        help="Debug options, separated by commas",
-    )
-    directory = optparse.make_option(
-        '-d', '--directory', action='store', metavar="DIR",
-        help="Write the output files to DIR.",
-    )
-    fail_under = optparse.make_option(
-        '', '--fail-under', action='store', metavar="MIN", type="int",
-        help="Exit with a status of 2 if the total coverage is less than MIN.",
-    )
-    help = optparse.make_option(
-        '-h', '--help', action='store_true',
-        help="Get help on this command.",
-    )
-    ignore_errors = optparse.make_option(
-        '-i', '--ignore-errors', action='store_true',
-        help="Ignore errors while reading source files.",
-    )
-    include = optparse.make_option(
-        '', '--include', action='store',
-        metavar="PAT1,PAT2,...",
-        help=(
-            "Include only files whose paths match one of these patterns. "
-            "Accepts shell-style wildcards, which must be quoted."
-        ),
-    )
-    pylib = optparse.make_option(
-        '-L', '--pylib', action='store_true',
-        help=(
-            "Measure coverage even inside the Python installed library, "
-            "which isn't done by default."
-        ),
-    )
-    show_missing = optparse.make_option(
-        '-m', '--show-missing', action='store_true',
-        help="Show line numbers of statements in each module that weren't executed.",
-    )
-    skip_covered = optparse.make_option(
-        '--skip-covered', action='store_true',
-        help="Skip files with 100% coverage.",
-    )
-    omit = optparse.make_option(
-        '', '--omit', action='store',
-        metavar="PAT1,PAT2,...",
-        help=(
-            "Omit files whose paths match one of these patterns. "
-            "Accepts shell-style wildcards, which must be quoted."
-        ),
-    )
-    output_xml = optparse.make_option(
-        '-o', '', action='store', dest="outfile",
-        metavar="OUTFILE",
-        help="Write the XML report to this file. Defaults to 'coverage.xml'",
-    )
-    parallel_mode = optparse.make_option(
-        '-p', '--parallel-mode', action='store_true',
-        help=(
-            "Append the machine name, process id and random number to the "
-            ".coverage data file name to simplify collecting data from "
-            "many processes."
-        ),
-    )
-    module = optparse.make_option(
-        '-m', '--module', action='store_true',
-        help=(
-            "<pyfile> is an importable Python module, not a script path, "
-            "to be run as 'python -m' would run it."
-        ),
-    )
-    rcfile = optparse.make_option(
-        '', '--rcfile', action='store',
-        help="Specify configuration file.  Defaults to '.coveragerc'",
-    )
-    source = optparse.make_option(
-        '', '--source', action='store', metavar="SRC1,SRC2,...",
-        help="A list of packages or directories of code to be measured.",
-    )
-    timid = optparse.make_option(
-        '', '--timid', action='store_true',
-        help=(
-            "Use a simpler but slower trace method.  Try this if you get "
-            "seemingly impossible results!"
-        ),
-    )
-    title = optparse.make_option(
-        '', '--title', action='store', metavar="TITLE",
-        help="A text string to use as the title on the HTML.",
-    )
-    version = optparse.make_option(
-        '', '--version', action='store_true',
-        help="Display version information and exit.",
-    )
-
-
-class CoverageOptionParser(optparse.OptionParser, object):
-    """Base OptionParser for coverage.py.
-
-    Problems don't exit the program.
-    Defaults are initialized for all options.
-
-    """
-
-    def __init__(self, *args, **kwargs):
-        super(CoverageOptionParser, self).__init__(
-            add_help_option=False, *args, **kwargs
-            )
-        self.set_defaults(
-            action=None,
-            append=None,
-            branch=None,
-            concurrency=None,
-            debug=None,
-            directory=None,
-            fail_under=None,
-            help=None,
-            ignore_errors=None,
-            include=None,
-            module=None,
-            omit=None,
-            parallel_mode=None,
-            pylib=None,
-            rcfile=True,
-            show_missing=None,
-            skip_covered=None,
-            source=None,
-            timid=None,
-            title=None,
-            version=None,
-            )
-
-        self.disable_interspersed_args()
-        self.help_fn = self.help_noop
-
-    def help_noop(self, error=None, topic=None, parser=None):
-        """No-op help function."""
-        pass
-
-    class OptionParserError(Exception):
-        """Used to stop the optparse error handler ending the process."""
-        pass
-
-    def parse_args_ok(self, args=None, options=None):
-        """Call optparse.parse_args, but return a triple:
-
-        (ok, options, args)
-
-        """
-        try:
-            options, args = \
-                super(CoverageOptionParser, self).parse_args(args, options)
-        except self.OptionParserError:
-            return False, None, None
-        return True, options, args
-
-    def error(self, msg):
-        """Override optparse.error so sys.exit doesn't get called."""
-        self.help_fn(msg)
-        raise self.OptionParserError
-
-
-class GlobalOptionParser(CoverageOptionParser):
-    """Command-line parser for coverage.py global option arguments."""
-
-    def __init__(self):
-        super(GlobalOptionParser, self).__init__()
-
-        self.add_options([
-            Opts.help,
-            Opts.version,
-        ])
-
-
-class CmdOptionParser(CoverageOptionParser):
-    """Parse one of the new-style commands for coverage.py."""
-
-    def __init__(self, action, options=None, defaults=None, usage=None, description=None):
-        """Create an OptionParser for a coverage.py command.
-
-        `action` is the slug to put into `options.action`.
-        `options` is a list of Option's for the command.
-        `defaults` is a dict of default value for options.
-        `usage` is the usage string to display in help.
-        `description` is the description of the command, for the help text.
-
-        """
-        if usage:
-            usage = "%prog " + usage
-        super(CmdOptionParser, self).__init__(
-            usage=usage,
-            description=description,
-        )
-        self.set_defaults(action=action, **(defaults or {}))
-        if options:
-            self.add_options(options)
-        self.cmd = action
-
-    def __eq__(self, other):
-        # A convenience equality, so that I can put strings in unit test
-        # results, and they will compare equal to objects.
-        return (other == "<CmdOptionParser:%s>" % self.cmd)
-
-    def get_prog_name(self):
-        """Override of an undocumented function in optparse.OptionParser."""
-        program_name = super(CmdOptionParser, self).get_prog_name()
-
-        # Include the sub-command for this parser as part of the command.
-        return "%(command)s %(subcommand)s" % {'command': program_name, 'subcommand': self.cmd}
-
-
-GLOBAL_ARGS = [
-    Opts.debug,
-    Opts.help,
-    Opts.rcfile,
-    ]
-
-CMDS = {
-    'annotate': CmdOptionParser(
-        "annotate",
-        [
-            Opts.directory,
-            Opts.ignore_errors,
-            Opts.include,
-            Opts.omit,
-            ] + GLOBAL_ARGS,
-        usage="[options] [modules]",
-        description=(
-            "Make annotated copies of the given files, marking statements that are executed "
-            "with > and statements that are missed with !."
-        ),
-    ),
-
-    'combine': CmdOptionParser(
-        "combine",
-        GLOBAL_ARGS,
-        usage="<path1> <path2> ... <pathN>",
-        description=(
-            "Combine data from multiple coverage files collected "
-            "with 'run -p'.  The combined results are written to a single "
-            "file representing the union of the data. The positional "
-            "arguments are data files or directories containing data files. "
-            "If no paths are provided, data files in the default data file's "
-            "directory are combined."
-        ),
-    ),
-
-    'debug': CmdOptionParser(
-        "debug", GLOBAL_ARGS,
-        usage="<topic>",
-        description=(
-            "Display information on the internals of coverage.py, "
-            "for diagnosing problems. "
-            "Topics are 'data' to show a summary of the collected data, "
-            "or 'sys' to show installation information."
-        ),
-    ),
-
-    'erase': CmdOptionParser(
-        "erase", GLOBAL_ARGS,
-        usage=" ",
-        description="Erase previously collected coverage data.",
-    ),
-
-    'help': CmdOptionParser(
-        "help", GLOBAL_ARGS,
-        usage="[command]",
-        description="Describe how to use coverage.py",
-    ),
-
-    'html': CmdOptionParser(
-        "html",
-        [
-            Opts.directory,
-            Opts.fail_under,
-            Opts.ignore_errors,
-            Opts.include,
-            Opts.omit,
-            Opts.title,
-            ] + GLOBAL_ARGS,
-        usage="[options] [modules]",
-        description=(
-            "Create an HTML report of the coverage of the files.  "
-            "Each file gets its own page, with the source decorated to show "
-            "executed, excluded, and missed lines."
-        ),
-    ),
-
-    'report': CmdOptionParser(
-        "report",
-        [
-            Opts.fail_under,
-            Opts.ignore_errors,
-            Opts.include,
-            Opts.omit,
-            Opts.show_missing,
-            Opts.skip_covered,
-            ] + GLOBAL_ARGS,
-        usage="[options] [modules]",
-        description="Report coverage statistics on modules."
-    ),
-
-    'run': CmdOptionParser(
-        "run",
-        [
-            Opts.append,
-            Opts.branch,
-            Opts.concurrency,
-            Opts.include,
-            Opts.module,
-            Opts.omit,
-            Opts.pylib,
-            Opts.parallel_mode,
-            Opts.source,
-            Opts.timid,
-            ] + GLOBAL_ARGS,
-        usage="[options] <pyfile> [program options]",
-        description="Run a Python program, measuring code execution."
-    ),
-
-    'xml': CmdOptionParser(
-        "xml",
-        [
-            Opts.fail_under,
-            Opts.ignore_errors,
-            Opts.include,
-            Opts.omit,
-            Opts.output_xml,
-            ] + GLOBAL_ARGS,
-        usage="[options] [modules]",
-        description="Generate an XML report of coverage results."
-    ),
-}
-
-
-OK, ERR, FAIL_UNDER = 0, 1, 2
-
-
-class CoverageScript(object):
-    """The command-line interface to coverage.py."""
-
-    def __init__(self, _covpkg=None, _run_python_file=None,
-                 _run_python_module=None, _help_fn=None, _path_exists=None):
-        # _covpkg is for dependency injection, so we can test this code.
-        if _covpkg:
-            self.covpkg = _covpkg
-        else:
-            import coverage
-            self.covpkg = coverage
-
-        # For dependency injection:
-        self.run_python_file = _run_python_file or run_python_file
-        self.run_python_module = _run_python_module or run_python_module
-        self.help_fn = _help_fn or self.help
-        self.path_exists = _path_exists or os.path.exists
-        self.global_option = False
-
-        self.coverage = None
-
-        self.program_name = os.path.basename(sys.argv[0])
-        if env.WINDOWS:
-            # entry_points={'console_scripts':...} on Windows makes files
-            # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
-            # invoke coverage-script.py, coverage3-script.py, and
-            # coverage-3.5-script.py.  argv[0] is the .py file, but we want to
-            # get back to the original form.
-            auto_suffix = "-script.py"
-            if self.program_name.endswith(auto_suffix):
-                self.program_name = self.program_name[:-len(auto_suffix)]
-
-    def command_line(self, argv):
-        """The bulk of the command line interface to coverage.py.
-
-        `argv` is the argument list to process.
-
-        Returns 0 if all is well, 1 if something went wrong.
-
-        """
-        # Collect the command-line options.
-        if not argv:
-            self.help_fn(topic='minimum_help')
-            return OK
-
-        # The command syntax we parse depends on the first argument.  Global
-        # switch syntax always starts with an option.
-        self.global_option = argv[0].startswith('-')
-        if self.global_option:
-            parser = GlobalOptionParser()
-        else:
-            parser = CMDS.get(argv[0])
-            if not parser:
-                self.help_fn("Unknown command: '%s'" % argv[0])
-                return ERR
-            argv = argv[1:]
-
-        parser.help_fn = self.help_fn
-        ok, options, args = parser.parse_args_ok(argv)
-        if not ok:
-            return ERR
-
-        # Handle help and version.
-        if self.do_help(options, args, parser):
-            return OK
-
-        # Check for conflicts and problems in the options.
-        if not self.args_ok(options, args):
-            return ERR
-
-        # We need to be able to import from the current directory, because
-        # plugins may try to, for example, to read Django settings.
-        sys.path[0] = ''
-
-        # Listify the list options.
-        source = unshell_list(options.source)
-        omit = unshell_list(options.omit)
-        include = unshell_list(options.include)
-        debug = unshell_list(options.debug)
-
-        # Do something.
-        self.coverage = self.covpkg.coverage(
-            data_suffix=options.parallel_mode,
-            cover_pylib=options.pylib,
-            timid=options.timid,
-            branch=options.branch,
-            config_file=options.rcfile,
-            source=source,
-            omit=omit,
-            include=include,
-            debug=debug,
-            concurrency=options.concurrency,
-            )
-
-        if options.action == "debug":
-            return self.do_debug(args)
-
-        elif options.action == "erase":
-            self.coverage.erase()
-            return OK
-
-        elif options.action == "run":
-            return self.do_run(options, args)
-
-        elif options.action == "combine":
-            self.coverage.load()
-            data_dirs = args or None
-            self.coverage.combine(data_dirs)
-            self.coverage.save()
-            return OK
-
-        # Remaining actions are reporting, with some common options.
-        report_args = dict(
-            morfs=unglob_args(args),
-            ignore_errors=options.ignore_errors,
-            omit=omit,
-            include=include,
-            )
-
-        self.coverage.load()
-
-        total = None
-        if options.action == "report":
-            total = self.coverage.report(
-                show_missing=options.show_missing,
-                skip_covered=options.skip_covered, **report_args)
-        elif options.action == "annotate":
-            self.coverage.annotate(
-                directory=options.directory, **report_args)
-        elif options.action == "html":
-            total = self.coverage.html_report(
-                directory=options.directory, title=options.title,
-                **report_args)
-        elif options.action == "xml":
-            outfile = options.outfile
-            total = self.coverage.xml_report(outfile=outfile, **report_args)
-
-        if total is not None:
-            # Apply the command line fail-under options, and then use the config
-            # value, so we can get fail_under from the config file.
-            if options.fail_under is not None:
-                self.coverage.set_option("report:fail_under", options.fail_under)
-
-            if self.coverage.get_option("report:fail_under"):
-
-                # Total needs to be rounded, but be careful of 0 and 100.
-                if 0 < total < 1:
-                    total = 1
-                elif 99 < total < 100:
-                    total = 99
-                else:
-                    total = round(total)
-
-                if total >= self.coverage.get_option("report:fail_under"):
-                    return OK
-                else:
-                    return FAIL_UNDER
-
-        return OK
-
-    def help(self, error=None, topic=None, parser=None):
-        """Display an error message, or the named topic."""
-        assert error or topic or parser
-        if error:
-            print(error)
-            print("Use '%s help' for help." % (self.program_name,))
-        elif parser:
-            print(parser.format_help().strip())
-        else:
-            help_params = dict(self.covpkg.__dict__)
-            help_params['program_name'] = self.program_name
-            if CTracer is not None:
-                help_params['extension_modifier'] = 'with C extension'
-            else:
-                help_params['extension_modifier'] = 'without C extension'
-            help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
-            if help_msg:
-                print(help_msg.format(**help_params))
-            else:
-                print("Don't know topic %r" % topic)
-
-    def do_help(self, options, args, parser):
-        """Deal with help requests.
-
-        Return True if it handled the request, False if not.
-
-        """
-        # Handle help.
-        if options.help:
-            if self.global_option:
-                self.help_fn(topic='help')
-            else:
-                self.help_fn(parser=parser)
-            return True
-
-        if options.action == "help":
-            if args:
-                for a in args:
-                    parser = CMDS.get(a)
-                    if parser:
-                        self.help_fn(parser=parser)
-                    else:
-                        self.help_fn(topic=a)
-            else:
-                self.help_fn(topic='help')
-            return True
-
-        # Handle version.
-        if options.version:
-            self.help_fn(topic='version')
-            return True
-
-        return False
-
-    def args_ok(self, options, args):
-        """Check for conflicts and problems in the options.
-
-        Returns True if everything is OK, or False if not.
-
-        """
-        if options.action == "run" and not args:
-            self.help_fn("Nothing to do.")
-            return False
-
-        return True
-
-    def do_run(self, options, args):
-        """Implementation of 'coverage run'."""
-
-        if options.append and self.coverage.get_option("run:parallel"):
-            self.help_fn("Can't append to data files in parallel mode.")
-            return ERR
-
-        if not self.coverage.get_option("run:parallel"):
-            if not options.append:
-                self.coverage.erase()
-
-        # Run the script.
-        self.coverage.start()
-        code_ran = True
-        try:
-            if options.module:
-                self.run_python_module(args[0], args)
-            else:
-                filename = args[0]
-                self.run_python_file(filename, args)
-        except NoSource:
-            code_ran = False
-            raise
-        finally:
-            self.coverage.stop()
-            if code_ran:
-                if options.append:
-                    data_file = self.coverage.get_option("run:data_file")
-                    if self.path_exists(data_file):
-                        self.coverage.combine(data_paths=[data_file])
-                self.coverage.save()
-
-        return OK
-
-    def do_debug(self, args):
-        """Implementation of 'coverage debug'."""
-
-        if not args:
-            self.help_fn("What information would you like: data, sys?")
-            return ERR
-
-        for info in args:
-            if info == 'sys':
-                sys_info = self.coverage.sys_info()
-                print(info_header("sys"))
-                for line in info_formatter(sys_info):
-                    print(" %s" % line)
-            elif info == 'data':
-                self.coverage.load()
-                data = self.coverage.data
-                print(info_header("data"))
-                print("path: %s" % self.coverage.data_files.filename)
-                if data:
-                    print("has_arcs: %r" % data.has_arcs())
-                    summary = data.line_counts(fullpath=True)
-                    filenames = sorted(summary.keys())
-                    print("\n%d files:" % len(filenames))
-                    for f in filenames:
-                        line = "%s: %d lines" % (f, summary[f])
-                        plugin = data.file_tracer(f)
-                        if plugin:
-                            line += " [%s]" % plugin
-                        print(line)
-                else:
-                    print("No data collected")
-            else:
-                self.help_fn("Don't know what you mean by %r" % info)
-                return ERR
-
-        return OK
-
-
-def unshell_list(s):
-    """Turn a command-line argument into a list."""
-    if not s:
-        return None
-    if env.WINDOWS:
-        # When running coverage.py as coverage.exe, some of the behavior
-        # of the shell is emulated: wildcards are expanded into a list of
-        # file names.  So you have to single-quote patterns on the command
-        # line, but (not) helpfully, the single quotes are included in the
-        # argument, so we have to strip them off here.
-        s = s.strip("'")
-    return s.split(',')
-
-
-def unglob_args(args):
-    """Interpret shell wildcards for platforms that need it."""
-    if env.WINDOWS:
-        globbed = []
-        for arg in args:
-            if '?' in arg or '*' in arg:
-                globbed.extend(glob.glob(arg))
-            else:
-                globbed.append(arg)
-        args = globbed
-    return args
-
-
-HELP_TOPICS = {
-    'help': """\
-        Coverage.py, version {__version__} {extension_modifier}
-        Measure, collect, and report on code coverage in Python programs.
-
-        usage: {program_name} <command> [options] [args]
-
-        Commands:
-            annotate    Annotate source files with execution information.
-            combine     Combine a number of data files.
-            erase       Erase previously collected coverage data.
-            help        Get help on using coverage.py.
-            html        Create an HTML report.
-            report      Report coverage stats on modules.
-            run         Run a Python program and measure code execution.
-            xml         Create an XML report of coverage results.
-
-        Use "{program_name} help <command>" for detailed help on any command.
-        For full documentation, see {__url__}
-    """,
-
-    'minimum_help': """\
-        Code coverage for Python.  Use '{program_name} help' for help.
-    """,
-
-    'version': """\
-        Coverage.py, version {__version__} {extension_modifier}
-        Documentation at {__url__}
-    """,
-}
-
-
-def main(argv=None):
-    """The main entry point to coverage.py.
-
-    This is installed as the script entry point.
-
-    """
-    if argv is None:
-        argv = sys.argv[1:]
-    try:
-        status = CoverageScript().command_line(argv)
-    except ExceptionDuringRun as err:
-        # An exception was caught while running the product code.  The
-        # sys.exc_info() return tuple is packed into an ExceptionDuringRun
-        # exception.
-        traceback.print_exception(*err.args)
-        status = ERR
-    except CoverageException as err:
-        # A controlled error inside coverage.py: print the message to the user.
-        print(err)
-        status = ERR
-    except SystemExit as err:
-        # The user called `sys.exit()`.  Exit with their argument, if any.
-        if err.args:
-            status = err.args[0]
-        else:
-            status = None
-    return status
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/collector.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,364 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Raw data collector for coverage.py."""
-
-import os
-import sys
-
-from coverage import env
-from coverage.backward import iitems
-from coverage.files import abs_file
-from coverage.misc import CoverageException, isolate_module
-from coverage.pytracer import PyTracer
-
-os = isolate_module(os)
-
-
-try:
-    # Use the C extension code when we can, for speed.
-    from coverage.tracer import CTracer, CFileDisposition   # pylint: disable=no-name-in-module
-except ImportError:
-    # Couldn't import the C extension, maybe it isn't built.
-    if os.getenv('COVERAGE_TEST_TRACER') == 'c':
-        # During testing, we use the COVERAGE_TEST_TRACER environment variable
-        # to indicate that we've fiddled with the environment to test this
-        # fallback code.  If we thought we had a C tracer, but couldn't import
-        # it, then exit quickly and clearly instead of dribbling confusing
-        # errors. I'm using sys.exit here instead of an exception because an
-        # exception here causes all sorts of other noise in unittest.
-        sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
-        sys.exit(1)
-    CTracer = None
-
-
-class FileDisposition(object):
-    """A simple value type for recording what to do with a file."""
-    pass
-
-
-def should_start_context(frame):
-    """Who-Tests-What hack: Determine whether this frame begins a new who-context."""
-    fn_name = frame.f_code.co_name
-    if fn_name.startswith("test"):
-        return fn_name
-
-
-class Collector(object):
-    """Collects trace data.
-
-    Creates a Tracer object for each thread, since they track stack
-    information.  Each Tracer points to the same shared data, contributing
-    traced data points.
-
-    When the Collector is started, it creates a Tracer for the current thread,
-    and installs a function to create Tracers for each new thread started.
-    When the Collector is stopped, all active Tracers are stopped.
-
-    Threads started while the Collector is stopped will never have Tracers
-    associated with them.
-
-    """
-
-    # The stack of active Collectors.  Collectors are added here when started,
-    # and popped when stopped.  Collectors on the stack are paused when not
-    # the top, and resumed when they become the top again.
-    _collectors = []
-
-    def __init__(self, should_trace, check_include, timid, branch, warn, concurrency):
-        """Create a collector.
-
-        `should_trace` is a function, taking a file name, and returning a
-        `coverage.FileDisposition object`.
-
-        `check_include` is a function taking a file name and a frame. It returns
-        a boolean: True if the file should be traced, False if not.
-
-        If `timid` is true, then a slower simpler trace function will be
-        used.  This is important for some environments where manipulation of
-        tracing functions make the faster more sophisticated trace function not
-        operate properly.
-
-        If `branch` is true, then branches will be measured.  This involves
-        collecting data on which statements followed each other (arcs).  Use
-        `get_arc_data` to get the arc data.
-
-        `warn` is a warning function, taking a single string message argument,
-        to be used if a warning needs to be issued.
-
-        `concurrency` is a string indicating the concurrency library in use.
-        Valid values are "greenlet", "eventlet", "gevent", or "thread" (the
-        default).
-
-        """
-        self.should_trace = should_trace
-        self.check_include = check_include
-        self.warn = warn
-        self.branch = branch
-        self.threading = None
-        self.concurrency = concurrency
-
-        self.concur_id_func = None
-
-        try:
-            if concurrency == "greenlet":
-                import greenlet
-                self.concur_id_func = greenlet.getcurrent
-            elif concurrency == "eventlet":
-                import eventlet.greenthread     # pylint: disable=import-error,useless-suppression
-                self.concur_id_func = eventlet.greenthread.getcurrent
-            elif concurrency == "gevent":
-                import gevent                   # pylint: disable=import-error,useless-suppression
-                self.concur_id_func = gevent.getcurrent
-            elif concurrency == "thread" or not concurrency:
-                # It's important to import threading only if we need it.  If
-                # it's imported early, and the program being measured uses
-                # gevent, then gevent's monkey-patching won't work properly.
-                import threading
-                self.threading = threading
-            else:
-                raise CoverageException("Don't understand concurrency=%s" % concurrency)
-        except ImportError:
-            raise CoverageException(
-                "Couldn't trace with concurrency=%s, the module isn't installed." % concurrency
-            )
-
-        # Who-Tests-What is just a hack at the moment, so turn it on with an
-        # environment variable.
-        self.wtw = int(os.getenv('COVERAGE_WTW', 0))
-
-        self.reset()
-
-        if timid:
-            # Being timid: use the simple Python trace function.
-            self._trace_class = PyTracer
-        else:
-            # Being fast: use the C Tracer if it is available, else the Python
-            # trace function.
-            self._trace_class = CTracer or PyTracer
-
-        if self._trace_class is CTracer:
-            self.file_disposition_class = CFileDisposition
-            self.supports_plugins = True
-        else:
-            self.file_disposition_class = FileDisposition
-            self.supports_plugins = False
-
-    def __repr__(self):
-        return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
-
-    def tracer_name(self):
-        """Return the class name of the tracer we're using."""
-        return self._trace_class.__name__
-
-    def reset(self):
-        """Clear collected data, and prepare to collect more."""
-        # A dictionary mapping file names to dicts with line number keys (if not
-        # branch coverage), or mapping file names to dicts with line number
-        # pairs as keys (if branch coverage).
-        self.data = {}
-
-        # A dict mapping contexts to data dictionaries.
-        self.contexts = {}
-        self.contexts[None] = self.data
-
-        # A dictionary mapping file names to file tracer plugin names that will
-        # handle them.
-        self.file_tracers = {}
-
-        # The .should_trace_cache attribute is a cache from file names to
-        # coverage.FileDisposition objects, or None.  When a file is first
-        # considered for tracing, a FileDisposition is obtained from
-        # Coverage.should_trace.  Its .trace attribute indicates whether the
-        # file should be traced or not.  If it should be, a plugin with dynamic
-        # file names can decide not to trace it based on the dynamic file name
-        # being excluded by the inclusion rules, in which case the
-        # FileDisposition will be replaced by None in the cache.
-        if env.PYPY:
-            import __pypy__                     # pylint: disable=import-error
-            # Alex Gaynor said:
-            # should_trace_cache is a strictly growing key: once a key is in
-            # it, it never changes.  Further, the keys used to access it are
-            # generally constant, given sufficient context. That is to say, at
-            # any given point _trace() is called, pypy is able to know the key.
-            # This is because the key is determined by the physical source code
-            # line, and that's invariant with the call site.
-            #
-            # This property of a dict with immutable keys, combined with
-            # call-site-constant keys is a match for PyPy's module dict,
-            # which is optimized for such workloads.
-            #
-            # This gives a 20% benefit on the workload described at
-            # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
-            self.should_trace_cache = __pypy__.newdict("module")
-        else:
-            self.should_trace_cache = {}
-
-        # Our active Tracers.
-        self.tracers = []
-
-    def _start_tracer(self):
-        """Start a new Tracer object, and store it in self.tracers."""
-        tracer = self._trace_class()
-        tracer.data = self.data
-        tracer.trace_arcs = self.branch
-        tracer.should_trace = self.should_trace
-        tracer.should_trace_cache = self.should_trace_cache
-        tracer.warn = self.warn
-
-        if hasattr(tracer, 'concur_id_func'):
-            tracer.concur_id_func = self.concur_id_func
-        elif self.concur_id_func:
-            raise CoverageException(
-                "Can't support concurrency=%s with %s, only threads are supported" % (
-                    self.concurrency, self.tracer_name(),
-                )
-            )
-
-        if hasattr(tracer, 'file_tracers'):
-            tracer.file_tracers = self.file_tracers
-        if hasattr(tracer, 'threading'):
-            tracer.threading = self.threading
-        if hasattr(tracer, 'check_include'):
-            tracer.check_include = self.check_include
-        if self.wtw:
-            if hasattr(tracer, 'should_start_context'):
-                tracer.should_start_context = should_start_context
-            if hasattr(tracer, 'switch_context'):
-                tracer.switch_context = self.switch_context
-
-        fn = tracer.start()
-        self.tracers.append(tracer)
-
-        return fn
-
-    # The trace function has to be set individually on each thread before
-    # execution begins.  Ironically, the only support the threading module has
-    # for running code before the thread main is the tracing function.  So we
-    # install this as a trace function, and the first time it's called, it does
-    # the real trace installation.
-
-    def _installation_trace(self, frame, event, arg):
-        """Called on new threads, installs the real tracer."""
-        # Remove ourselves as the trace function.
-        sys.settrace(None)
-        # Install the real tracer.
-        fn = self._start_tracer()
-        # Invoke the real trace function with the current event, to be sure
-        # not to lose an event.
-        if fn:
-            fn = fn(frame, event, arg)
-        # Return the new trace function to continue tracing in this scope.
-        return fn
-
-    def start(self):
-        """Start collecting trace information."""
-        if self._collectors:
-            self._collectors[-1].pause()
-
-        # Check to see whether we had a fullcoverage tracer installed. If so,
-        # get the stack frames it stashed away for us.
-        traces0 = []
-        fn0 = sys.gettrace()
-        if fn0:
-            tracer0 = getattr(fn0, '__self__', None)
-            if tracer0:
-                traces0 = getattr(tracer0, 'traces', [])
-
-        try:
-            # Install the tracer on this thread.
-            fn = self._start_tracer()
-        except:
-            if self._collectors:
-                self._collectors[-1].resume()
-            raise
-
-        # If _start_tracer succeeded, then we add ourselves to the global
-        # stack of collectors.
-        self._collectors.append(self)
-
-        # Replay all the events from fullcoverage into the new trace function.
-        for args in traces0:
-            (frame, event, arg), lineno = args
-            try:
-                fn(frame, event, arg, lineno=lineno)
-            except TypeError:
-                raise Exception("fullcoverage must be run with the C trace function.")
-
-        # Install our installation tracer in threading, to jump start other
-        # threads.
-        if self.threading:
-            self.threading.settrace(self._installation_trace)
-
-    def stop(self):
-        """Stop collecting trace information."""
-        assert self._collectors
-        assert self._collectors[-1] is self, (
-            "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
-        )
-
-        self.pause()
-        self.tracers = []
-
-        # Remove this Collector from the stack, and resume the one underneath
-        # (if any).
-        self._collectors.pop()
-        if self._collectors:
-            self._collectors[-1].resume()
-
-    def pause(self):
-        """Pause tracing, but be prepared to `resume`."""
-        for tracer in self.tracers:
-            tracer.stop()
-            stats = tracer.get_stats()
-            if stats:
-                print("\nCoverage.py tracer stats:")
-                for k in sorted(stats.keys()):
-                    print("%20s: %s" % (k, stats[k]))
-        if self.threading:
-            self.threading.settrace(None)
-
-    def resume(self):
-        """Resume tracing after a `pause`."""
-        for tracer in self.tracers:
-            tracer.start()
-        if self.threading:
-            self.threading.settrace(self._installation_trace)
-        else:
-            self._start_tracer()
-
-    def switch_context(self, new_context):
-        """Who-Tests-What hack: switch to a new who-context."""
-        # Make a new data dict, or find the existing one, and switch all the
-        # tracers to use it.
-        data = self.contexts.setdefault(new_context, {})
-        for tracer in self.tracers:
-            tracer.data = data
-
-    def save_data(self, covdata):
-        """Save the collected data to a `CoverageData`.
-
-        Also resets the collector.
-
-        """
-        def abs_file_dict(d):
-            """Return a dict like d, but with keys modified by `abs_file`."""
-            return dict((abs_file(k), v) for k, v in iitems(d))
-
-        if self.branch:
-            covdata.add_arcs(abs_file_dict(self.data))
-        else:
-            covdata.add_lines(abs_file_dict(self.data))
-        covdata.add_file_tracers(abs_file_dict(self.file_tracers))
-
-        if self.wtw:
-            # Just a hack, so just hack it.
-            import pprint
-            out_file = "coverage_wtw_{:06}.py".format(os.getpid())
-            with open(out_file, "w") as wtw_out:
-                pprint.pprint(self.contexts, wtw_out)
-
-        self.reset()
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/config.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,368 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Config file for coverage.py"""
-
-import collections
-import os
-import re
-import sys
-
-from coverage.backward import configparser, iitems, string_class
-from coverage.misc import CoverageException, isolate_module
-
-os = isolate_module(os)
-
-
-class HandyConfigParser(configparser.RawConfigParser):
-    """Our specialization of ConfigParser."""
-
-    def __init__(self, section_prefix):
-        configparser.RawConfigParser.__init__(self)
-        self.section_prefix = section_prefix
-
-    def read(self, filename):
-        """Read a file name as UTF-8 configuration data."""
-        kwargs = {}
-        if sys.version_info >= (3, 2):
-            kwargs['encoding'] = "utf-8"
-        return configparser.RawConfigParser.read(self, filename, **kwargs)
-
-    def has_option(self, section, option):
-        section = self.section_prefix + section
-        return configparser.RawConfigParser.has_option(self, section, option)
-
-    def has_section(self, section):
-        section = self.section_prefix + section
-        return configparser.RawConfigParser.has_section(self, section)
-
-    def options(self, section):
-        section = self.section_prefix + section
-        return configparser.RawConfigParser.options(self, section)
-
-    def get_section(self, section):
-        """Get the contents of a section, as a dictionary."""
-        d = {}
-        for opt in self.options(section):
-            d[opt] = self.get(section, opt)
-        return d
-
-    def get(self, section, *args, **kwargs):
-        """Get a value, replacing environment variables also.
-
-        The arguments are the same as `RawConfigParser.get`, but in the found
-        value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
-        environment variable ``WORD``.
-
-        Returns the finished value.
-
-        """
-        section = self.section_prefix + section
-        v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
-        def dollar_replace(m):
-            """Called for each $replacement."""
-            # Only one of the groups will have matched, just get its text.
-            word = next(w for w in m.groups() if w is not None)     # pragma: part covered
-            if word == "$":
-                return "$"
-            else:
-                return os.environ.get(word, '')
-
-        dollar_pattern = r"""(?x)   # Use extended regex syntax
-            \$(?:                   # A dollar sign, then
-            (?P<v1>\w+) |           #   a plain word,
-            {(?P<v2>\w+)} |         #   or a {-wrapped word,
-            (?P<char>[$])           #   or a dollar sign.
-            )
-            """
-        v = re.sub(dollar_pattern, dollar_replace, v)
-        return v
-
-    def getlist(self, section, option):
-        """Read a list of strings.
-
-        The value of `section` and `option` is treated as a comma- and newline-
-        separated list of strings.  Each value is stripped of whitespace.
-
-        Returns the list of strings.
-
-        """
-        value_list = self.get(section, option)
-        values = []
-        for value_line in value_list.split('\n'):
-            for value in value_line.split(','):
-                value = value.strip()
-                if value:
-                    values.append(value)
-        return values
-
-    def getregexlist(self, section, option):
-        """Read a list of full-line regexes.
-
-        The value of `section` and `option` is treated as a newline-separated
-        list of regexes.  Each value is stripped of whitespace.
-
-        Returns the list of strings.
-
-        """
-        line_list = self.get(section, option)
-        value_list = []
-        for value in line_list.splitlines():
-            value = value.strip()
-            try:
-                re.compile(value)
-            except re.error as e:
-                raise CoverageException(
-                    "Invalid [%s].%s value %r: %s" % (section, option, value, e)
-                )
-            if value:
-                value_list.append(value)
-        return value_list
-
-
-# The default line exclusion regexes.
-DEFAULT_EXCLUDE = [
-    r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
-]
-
-# The default partial branch regexes, to be modified by the user.
-DEFAULT_PARTIAL = [
-    r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
-]
-
-# The default partial branch regexes, based on Python semantics.
-# These are any Python branching constructs that can't actually execute all
-# their branches.
-DEFAULT_PARTIAL_ALWAYS = [
-    'while (True|1|False|0):',
-    'if (True|1|False|0):',
-]
-
-
-class CoverageConfig(object):
-    """Coverage.py configuration.
-
-    The attributes of this class are the various settings that control the
-    operation of coverage.py.
-
-    """
-    def __init__(self):
-        """Initialize the configuration attributes to their defaults."""
-        # Metadata about the config.
-        self.attempted_config_files = []
-        self.config_files = []
-
-        # Defaults for [run]
-        self.branch = False
-        self.concurrency = None
-        self.cover_pylib = False
-        self.data_file = ".coverage"
-        self.debug = []
-        self.note = None
-        self.parallel = False
-        self.plugins = []
-        self.source = None
-        self.timid = False
-
-        # Defaults for [report]
-        self.exclude_list = DEFAULT_EXCLUDE[:]
-        self.fail_under = 0
-        self.ignore_errors = False
-        self.include = None
-        self.omit = None
-        self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
-        self.partial_list = DEFAULT_PARTIAL[:]
-        self.precision = 0
-        self.show_missing = False
-        self.skip_covered = False
-
-        # Defaults for [html]
-        self.extra_css = None
-        self.html_dir = "htmlcov"
-        self.html_title = "Coverage report"
-
-        # Defaults for [xml]
-        self.xml_output = "coverage.xml"
-        self.xml_package_depth = 99
-
-        # Defaults for [paths]
-        self.paths = {}
-
-        # Options for plugins
-        self.plugin_options = {}
-
-    MUST_BE_LIST = ["omit", "include", "debug", "plugins"]
-
-    def from_args(self, **kwargs):
-        """Read config values from `kwargs`."""
-        for k, v in iitems(kwargs):
-            if v is not None:
-                if k in self.MUST_BE_LIST and isinstance(v, string_class):
-                    v = [v]
-                setattr(self, k, v)
-
-    def from_file(self, filename, section_prefix=""):
-        """Read configuration from a .rc file.
-
-        `filename` is a file name to read.
-
-        Returns True or False, whether the file could be read.
-
-        """
-        self.attempted_config_files.append(filename)
-
-        cp = HandyConfigParser(section_prefix)
-        try:
-            files_read = cp.read(filename)
-        except configparser.Error as err:
-            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
-        if not files_read:
-            return False
-
-        self.config_files.extend(files_read)
-
-        try:
-            for option_spec in self.CONFIG_FILE_OPTIONS:
-                self._set_attr_from_config_option(cp, *option_spec)
-        except ValueError as err:
-            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
-
-        # Check that there are no unrecognized options.
-        all_options = collections.defaultdict(set)
-        for option_spec in self.CONFIG_FILE_OPTIONS:
-            section, option = option_spec[1].split(":")
-            all_options[section].add(option)
-
-        for section, options in iitems(all_options):
-            if cp.has_section(section):
-                for unknown in set(cp.options(section)) - options:
-                    if section_prefix:
-                        section = section_prefix + section
-                    raise CoverageException(
-                        "Unrecognized option '[%s] %s=' in config file %s" % (
-                            section, unknown, filename
-                        )
-                    )
-
-        # [paths] is special
-        if cp.has_section('paths'):
-            for option in cp.options('paths'):
-                self.paths[option] = cp.getlist('paths', option)
-
-        # plugins can have options
-        for plugin in self.plugins:
-            if cp.has_section(plugin):
-                self.plugin_options[plugin] = cp.get_section(plugin)
-
-        return True
-
-    CONFIG_FILE_OPTIONS = [
-        # These are *args for _set_attr_from_config_option:
-        #   (attr, where, type_="")
-        #
-        #   attr is the attribute to set on the CoverageConfig object.
-        #   where is the section:name to read from the configuration file.
-        #   type_ is the optional type to apply, by using .getTYPE to read the
-        #       configuration value from the file.
-
-        # [run]
-        ('branch', 'run:branch', 'boolean'),
-        ('concurrency', 'run:concurrency'),
-        ('cover_pylib', 'run:cover_pylib', 'boolean'),
-        ('data_file', 'run:data_file'),
-        ('debug', 'run:debug', 'list'),
-        ('include', 'run:include', 'list'),
-        ('note', 'run:note'),
-        ('omit', 'run:omit', 'list'),
-        ('parallel', 'run:parallel', 'boolean'),
-        ('plugins', 'run:plugins', 'list'),
-        ('source', 'run:source', 'list'),
-        ('timid', 'run:timid', 'boolean'),
-
-        # [report]
-        ('exclude_list', 'report:exclude_lines', 'regexlist'),
-        ('fail_under', 'report:fail_under', 'int'),
-        ('ignore_errors', 'report:ignore_errors', 'boolean'),
-        ('include', 'report:include', 'list'),
-        ('omit', 'report:omit', 'list'),
-        ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
-        ('partial_list', 'report:partial_branches', 'regexlist'),
-        ('precision', 'report:precision', 'int'),
-        ('show_missing', 'report:show_missing', 'boolean'),
-        ('skip_covered', 'report:skip_covered', 'boolean'),
-
-        # [html]
-        ('extra_css', 'html:extra_css'),
-        ('html_dir', 'html:directory'),
-        ('html_title', 'html:title'),
-
-        # [xml]
-        ('xml_output', 'xml:output'),
-        ('xml_package_depth', 'xml:package_depth', 'int'),
-    ]
-
-    def _set_attr_from_config_option(self, cp, attr, where, type_=''):
-        """Set an attribute on self if it exists in the ConfigParser."""
-        section, option = where.split(":")
-        if cp.has_option(section, option):
-            method = getattr(cp, 'get' + type_)
-            setattr(self, attr, method(section, option))
-
-    def get_plugin_options(self, plugin):
-        """Get a dictionary of options for the plugin named `plugin`."""
-        return self.plugin_options.get(plugin, {})
-
-    def set_option(self, option_name, value):
-        """Set an option in the configuration.
-
-        `option_name` is a colon-separated string indicating the section and
-        option name.  For example, the ``branch`` option in the ``[run]``
-        section of the config file would be indicated with `"run:branch"`.
-
-        `value` is the new value for the option.
-
-        """
-
-        # Check all the hard-coded options.
-        for option_spec in self.CONFIG_FILE_OPTIONS:
-            attr, where = option_spec[:2]
-            if where == option_name:
-                setattr(self, attr, value)
-                return
-
-        # See if it's a plugin option.
-        plugin_name, _, key = option_name.partition(":")
-        if key and plugin_name in self.plugins:
-            self.plugin_options.setdefault(plugin_name, {})[key] = value
-            return
-
-        # If we get here, we didn't find the option.
-        raise CoverageException("No such option: %r" % option_name)
-
-    def get_option(self, option_name):
-        """Get an option from the configuration.
-
-        `option_name` is a colon-separated string indicating the section and
-        option name.  For example, the ``branch`` option in the ``[run]``
-        section of the config file would be indicated with `"run:branch"`.
-
-        Returns the value of the option.
-
-        """
-
-        # Check all the hard-coded options.
-        for option_spec in self.CONFIG_FILE_OPTIONS:
-            attr, where = option_spec[:2]
-            if where == option_name:
-                return getattr(self, attr)
-
-        # See if it's a plugin option.
-        plugin_name, _, key = option_name.partition(":")
-        if key and plugin_name in self.plugins:
-            return self.plugin_options.get(plugin_name, {}).get(key)
-
-        # If we get here, we didn't find the option.
-        raise CoverageException("No such option: %r" % option_name)
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/control.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1202 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Core control stuff for coverage.py."""
-
-import atexit
-import inspect
-import os
-import platform
-import re
-import sys
-import traceback
-
-from coverage import env, files
-from coverage.annotate import AnnotateReporter
-from coverage.backward import string_class, iitems
-from coverage.collector import Collector
-from coverage.config import CoverageConfig
-from coverage.data import CoverageData, CoverageDataFiles
-from coverage.debug import DebugControl
-from coverage.files import TreeMatcher, FnmatchMatcher
-from coverage.files import PathAliases, find_python_files, prep_patterns
-from coverage.files import ModuleMatcher, abs_file
-from coverage.html import HtmlReporter
-from coverage.misc import CoverageException, bool_or_none, join_regex
-from coverage.misc import file_be_gone, isolate_module
-from coverage.monkey import patch_multiprocessing
-from coverage.plugin import FileReporter
-from coverage.plugin_support import Plugins
-from coverage.python import PythonFileReporter
-from coverage.results import Analysis, Numbers
-from coverage.summary import SummaryReporter
-from coverage.xmlreport import XmlReporter
-
-os = isolate_module(os)
-
-# Pypy has some unusual stuff in the "stdlib".  Consider those locations
-# when deciding where the stdlib is.
-try:
-    import _structseq
-except ImportError:
-    _structseq = None
-
-
-class Coverage(object):
-    """Programmatic access to coverage.py.
-
-    To use::
-
-        from coverage import Coverage
-
-        cov = Coverage()
-        cov.start()
-        #.. call your code ..
-        cov.stop()
-        cov.html_report(directory='covhtml')
-
-    """
-    def __init__(
-        self, data_file=None, data_suffix=None, cover_pylib=None,
-        auto_data=False, timid=None, branch=None, config_file=True,
-        source=None, omit=None, include=None, debug=None,
-        concurrency=None,
-    ):
-        """
-        `data_file` is the base name of the data file to use, defaulting to
-        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
-        create the final file name.  If `data_suffix` is simply True, then a
-        suffix is created with the machine and process identity included.
-
-        `cover_pylib` is a boolean determining whether Python code installed
-        with the Python interpreter is measured.  This includes the Python
-        standard library and any packages installed with the interpreter.
-
-        If `auto_data` is true, then any existing data file will be read when
-        coverage measurement starts, and data will be saved automatically when
-        measurement stops.
-
-        If `timid` is true, then a slower and simpler trace function will be
-        used.  This is important for some environments where manipulation of
-        tracing functions breaks the faster trace function.
-
-        If `branch` is true, then branch coverage will be measured in addition
-        to the usual statement coverage.
-
-        `config_file` determines what configuration file to read:
-
-            * If it is ".coveragerc", it is interpreted as if it were True,
-              for backward compatibility.
-
-            * If it is a string, it is the name of the file to read.  If the
-              file can't be read, it is an error.
-
-            * If it is True, then a few standard files names are tried
-              (".coveragerc", "setup.cfg").  It is not an error for these files
-              to not be found.
-
-            * If it is False, then no configuration file is read.
-
-        `source` is a list of file paths or package names.  Only code located
-        in the trees indicated by the file paths or package names will be
-        measured.
-
-        `include` and `omit` are lists of file name patterns. Files that match
-        `include` will be measured, files that match `omit` will not.  Each
-        will also accept a single string argument.
-
-        `debug` is a list of strings indicating what debugging information is
-        desired.
-
-        `concurrency` is a string indicating the concurrency library being used
-        in the measured code.  Without this, coverage.py will get incorrect
-        results.  Valid strings are "greenlet", "eventlet", "gevent",
-        "multiprocessing", or "thread" (the default).
-
-        .. versionadded:: 4.0
-            The `concurrency` parameter.
-
-        """
-        # Build our configuration from a number of sources:
-        # 1: defaults:
-        self.config = CoverageConfig()
-
-        # 2: from the rcfile, .coveragerc or setup.cfg file:
-        if config_file:
-            did_read_rc = False
-            # Some API users were specifying ".coveragerc" to mean the same as
-            # True, so make it so.
-            if config_file == ".coveragerc":
-                config_file = True
-            specified_file = (config_file is not True)
-            if not specified_file:
-                config_file = ".coveragerc"
-
-            did_read_rc = self.config.from_file(config_file)
-
-            if not did_read_rc:
-                if specified_file:
-                    raise CoverageException(
-                        "Couldn't read '%s' as a config file" % config_file
-                        )
-                self.config.from_file("setup.cfg", section_prefix="coverage:")
-
-        # 3: from environment variables:
-        env_data_file = os.environ.get('COVERAGE_FILE')
-        if env_data_file:
-            self.config.data_file = env_data_file
-        debugs = os.environ.get('COVERAGE_DEBUG')
-        if debugs:
-            self.config.debug.extend(debugs.split(","))
-
-        # 4: from constructor arguments:
-        self.config.from_args(
-            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
-            branch=branch, parallel=bool_or_none(data_suffix),
-            source=source, omit=omit, include=include, debug=debug,
-            concurrency=concurrency,
-            )
-
-        self._debug_file = None
-        self._auto_data = auto_data
-        self._data_suffix = data_suffix
-
-        # The matchers for _should_trace.
-        self.source_match = None
-        self.source_pkgs_match = None
-        self.pylib_match = self.cover_match = None
-        self.include_match = self.omit_match = None
-
-        # Is it ok for no data to be collected?
-        self._warn_no_data = True
-        self._warn_unimported_source = True
-
-        # A record of all the warnings that have been issued.
-        self._warnings = []
-
-        # Other instance attributes, set later.
-        self.omit = self.include = self.source = None
-        self.source_pkgs = None
-        self.data = self.data_files = self.collector = None
-        self.plugins = None
-        self.pylib_dirs = self.cover_dirs = None
-        self.data_suffix = self.run_suffix = None
-        self._exclude_re = None
-        self.debug = None
-
-        # State machine variables:
-        # Have we initialized everything?
-        self._inited = False
-        # Have we started collecting and not stopped it?
-        self._started = False
-        # Have we measured some data and not harvested it?
-        self._measured = False
-
-    def _init(self):
-        """Set all the initial state.
-
-        This is called by the public methods to initialize state. This lets us
-        construct a :class:`Coverage` object, then tweak its state before this
-        function is called.
-
-        """
-        if self._inited:
-            return
-
-        # Create and configure the debugging controller. COVERAGE_DEBUG_FILE
-        # is an environment variable, the name of a file to append debug logs
-        # to.
-        if self._debug_file is None:
-            debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE")
-            if debug_file_name:
-                self._debug_file = open(debug_file_name, "a")
-            else:
-                self._debug_file = sys.stderr
-        self.debug = DebugControl(self.config.debug, self._debug_file)
-
-        # Load plugins
-        self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug)
-
-        # _exclude_re is a dict that maps exclusion list names to compiled
-        # regexes.
-        self._exclude_re = {}
-        self._exclude_regex_stale()
-
-        files.set_relative_directory()
-
-        # The source argument can be directories or package names.
-        self.source = []
-        self.source_pkgs = []
-        for src in self.config.source or []:
-            if os.path.exists(src):
-                self.source.append(files.canonical_filename(src))
-            else:
-                self.source_pkgs.append(src)
-
-        self.omit = prep_patterns(self.config.omit)
-        self.include = prep_patterns(self.config.include)
-
-        concurrency = self.config.concurrency
-        if concurrency == "multiprocessing":
-            patch_multiprocessing()
-            concurrency = None
-
-        self.collector = Collector(
-            should_trace=self._should_trace,
-            check_include=self._check_include_omit_etc,
-            timid=self.config.timid,
-            branch=self.config.branch,
-            warn=self._warn,
-            concurrency=concurrency,
-            )
-
-        # Early warning if we aren't going to be able to support plugins.
-        if self.plugins.file_tracers and not self.collector.supports_plugins:
-            self._warn(
-                "Plugin file tracers (%s) aren't supported with %s" % (
-                    ", ".join(
-                        plugin._coverage_plugin_name
-                            for plugin in self.plugins.file_tracers
-                        ),
-                    self.collector.tracer_name(),
-                    )
-                )
-            for plugin in self.plugins.file_tracers:
-                plugin._coverage_enabled = False
-
-        # Suffixes are a bit tricky.  We want to use the data suffix only when
-        # collecting data, not when combining data.  So we save it as
-        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
-        # find that we are collecting data later.
-        if self._data_suffix or self.config.parallel:
-            if not isinstance(self._data_suffix, string_class):
-                # if data_suffix=True, use .machinename.pid.random
-                self._data_suffix = True
-        else:
-            self._data_suffix = None
-        self.data_suffix = None
-        self.run_suffix = self._data_suffix
-
-        # Create the data file.  We do this at construction time so that the
-        # data file will be written into the directory where the process
-        # started rather than wherever the process eventually chdir'd to.
-        self.data = CoverageData(debug=self.debug)
-        self.data_files = CoverageDataFiles(basename=self.config.data_file, warn=self._warn)
-
-        # The directories for files considered "installed with the interpreter".
-        self.pylib_dirs = set()
-        if not self.config.cover_pylib:
-            # Look at where some standard modules are located. That's the
-            # indication for "installed with the interpreter". In some
-            # environments (virtualenv, for example), these modules may be
-            # spread across a few locations. Look at all the candidate modules
-            # we've imported, and take all the different ones.
-            for m in (atexit, inspect, os, platform, re, _structseq, traceback):
-                if m is not None and hasattr(m, "__file__"):
-                    self.pylib_dirs.add(self._canonical_dir(m))
-            if _structseq and not hasattr(_structseq, '__file__'):
-                # PyPy 2.4 has no __file__ in the builtin modules, but the code
-                # objects still have the file names.  So dig into one to find
-                # the path to exclude.
-                structseq_new = _structseq.structseq_new
-                try:
-                    structseq_file = structseq_new.func_code.co_filename
-                except AttributeError:
-                    structseq_file = structseq_new.__code__.co_filename
-                self.pylib_dirs.add(self._canonical_dir(structseq_file))
-
-        # To avoid tracing the coverage.py code itself, we skip anything
-        # located where we are.
-        self.cover_dirs = [self._canonical_dir(__file__)]
-        if env.TESTING:
-            # When testing, we use PyContracts, which should be considered
-            # part of coverage.py, and it uses six. Exclude those directories
-            # just as we exclude ourselves.
-            import contracts, six
-            for mod in [contracts, six]:
-                self.cover_dirs.append(self._canonical_dir(mod))
-
-        # Set the reporting precision.
-        Numbers.set_precision(self.config.precision)
-
-        atexit.register(self._atexit)
-
-        self._inited = True
-
-        # Create the matchers we need for _should_trace
-        if self.source or self.source_pkgs:
-            self.source_match = TreeMatcher(self.source)
-            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
-        else:
-            if self.cover_dirs:
-                self.cover_match = TreeMatcher(self.cover_dirs)
-            if self.pylib_dirs:
-                self.pylib_match = TreeMatcher(self.pylib_dirs)
-        if self.include:
-            self.include_match = FnmatchMatcher(self.include)
-        if self.omit:
-            self.omit_match = FnmatchMatcher(self.omit)
-
-        # The user may want to debug things, show info if desired.
-        wrote_any = False
-        if self.debug.should('config'):
-            config_info = sorted(self.config.__dict__.items())
-            self.debug.write_formatted_info("config", config_info)
-            wrote_any = True
-
-        if self.debug.should('sys'):
-            self.debug.write_formatted_info("sys", self.sys_info())
-            for plugin in self.plugins:
-                header = "sys: " + plugin._coverage_plugin_name
-                info = plugin.sys_info()
-                self.debug.write_formatted_info(header, info)
-            wrote_any = True
-
-        if wrote_any:
-            self.debug.write_formatted_info("end", ())
-
-    def _canonical_dir(self, morf):
-        """Return the canonical directory of the module or file `morf`."""
-        morf_filename = PythonFileReporter(morf, self).filename
-        return os.path.split(morf_filename)[0]
-
-    def _source_for_file(self, filename):
-        """Return the source file for `filename`.
-
-        Given a file name being traced, return the best guess as to the source
-        file to attribute it to.
-
-        """
-        if filename.endswith(".py"):
-            # .py files are themselves source files.
-            return filename
-
-        elif filename.endswith((".pyc", ".pyo")):
-            # Bytecode files probably have source files near them.
-            py_filename = filename[:-1]
-            if os.path.exists(py_filename):
-                # Found a .py file, use that.
-                return py_filename
-            if env.WINDOWS:
-                # On Windows, it could be a .pyw file.
-                pyw_filename = py_filename + "w"
-                if os.path.exists(pyw_filename):
-                    return pyw_filename
-            # Didn't find source, but it's probably the .py file we want.
-            return py_filename
-
-        elif filename.endswith("$py.class"):
-            # Jython is easy to guess.
-            return filename[:-9] + ".py"
-
-        # No idea, just use the file name as-is.
-        return filename
-
-    def _name_for_module(self, module_globals, filename):
-        """Get the name of the module for a set of globals and file name.
-
-        For configurability's sake, we allow __main__ modules to be matched by
-        their importable name.
-
-        If loaded via runpy (aka -m), we can usually recover the "original"
-        full dotted module name, otherwise, we resort to interpreting the
-        file name to get the module's name.  In the case that the module name
-        can't be determined, None is returned.
-
-        """
-        dunder_name = module_globals.get('__name__', None)
-
-        if isinstance(dunder_name, str) and dunder_name != '__main__':
-            # This is the usual case: an imported module.
-            return dunder_name
-
-        loader = module_globals.get('__loader__', None)
-        for attrname in ('fullname', 'name'):   # attribute renamed in py3.2
-            if hasattr(loader, attrname):
-                fullname = getattr(loader, attrname)
-            else:
-                continue
-
-            if isinstance(fullname, str) and fullname != '__main__':
-                # Module loaded via: runpy -m
-                return fullname
-
-        # Script as first argument to Python command line.
-        inspectedname = inspect.getmodulename(filename)
-        if inspectedname is not None:
-            return inspectedname
-        else:
-            return dunder_name
-
-    def _should_trace_internal(self, filename, frame):
-        """Decide whether to trace execution in `filename`, with a reason.
-
-        This function is called from the trace function.  As each new file name
-        is encountered, this function determines whether it is traced or not.
-
-        Returns a FileDisposition object.
-
-        """
-        original_filename = filename
-        disp = _disposition_init(self.collector.file_disposition_class, filename)
-
-        def nope(disp, reason):
-            """Simple helper to make it easy to return NO."""
-            disp.trace = False
-            disp.reason = reason
-            return disp
-
-        # Compiled Python files have two file names: frame.f_code.co_filename is
-        # the file name at the time the .pyc was compiled.  The second name is
-        # __file__, which is where the .pyc was actually loaded from.  Since
-        # .pyc files can be moved after compilation (for example, by being
-        # installed), we look for __file__ in the frame and prefer it to the
-        # co_filename value.
-        dunder_file = frame.f_globals.get('__file__')
-        if dunder_file:
-            filename = self._source_for_file(dunder_file)
-            if original_filename and not original_filename.startswith('<'):
-                orig = os.path.basename(original_filename)
-                if orig != os.path.basename(filename):
-                    # Files shouldn't be renamed when moved. This happens when
-                    # exec'ing code.  If it seems like something is wrong with
-                    # the frame's file name, then just use the original.
-                    filename = original_filename
-
-        if not filename:
-            # Empty string is pretty useless.
-            return nope(disp, "empty string isn't a file name")
-
-        if filename.startswith('memory:'):
-            return nope(disp, "memory isn't traceable")
-
-        if filename.startswith('<'):
-            # Lots of non-file execution is represented with artificial
-            # file names like "<string>", "<doctest readme.txt[0]>", or
-            # "<exec_function>".  Don't ever trace these executions, since we
-            # can't do anything with the data later anyway.
-            return nope(disp, "not a real file name")
-
-        # pyexpat does a dumb thing, calling the trace function explicitly from
-        # C code with a C file name.
-        if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
-            return nope(disp, "pyexpat lies about itself")
-
-        # Jython reports the .class file to the tracer, use the source file.
-        if filename.endswith("$py.class"):
-            filename = filename[:-9] + ".py"
-
-        canonical = files.canonical_filename(filename)
-        disp.canonical_filename = canonical
-
-        # Try the plugins, see if they have an opinion about the file.
-        plugin = None
-        for plugin in self.plugins.file_tracers:
-            if not plugin._coverage_enabled:
-                continue
-
-            try:
-                file_tracer = plugin.file_tracer(canonical)
-                if file_tracer is not None:
-                    file_tracer._coverage_plugin = plugin
-                    disp.trace = True
-                    disp.file_tracer = file_tracer
-                    if file_tracer.has_dynamic_source_filename():
-                        disp.has_dynamic_filename = True
-                    else:
-                        disp.source_filename = files.canonical_filename(
-                            file_tracer.source_filename()
-                        )
-                    break
-            except Exception:
-                self._warn(
-                    "Disabling plugin %r due to an exception:" % (
-                        plugin._coverage_plugin_name
-                    )
-                )
-                traceback.print_exc()
-                plugin._coverage_enabled = False
-                continue
-        else:
-            # No plugin wanted it: it's Python.
-            disp.trace = True
-            disp.source_filename = canonical
-
-        if not disp.has_dynamic_filename:
-            if not disp.source_filename:
-                raise CoverageException(
-                    "Plugin %r didn't set source_filename for %r" %
-                    (plugin, disp.original_filename)
-                )
-            reason = self._check_include_omit_etc_internal(
-                disp.source_filename, frame,
-            )
-            if reason:
-                nope(disp, reason)
-
-        return disp
-
-    def _check_include_omit_etc_internal(self, filename, frame):
-        """Check a file name against the include, omit, etc, rules.
-
-        Returns a string or None.  String means, don't trace, and is the reason
-        why.  None means no reason found to not trace.
-
-        """
-        modulename = self._name_for_module(frame.f_globals, filename)
-
-        # If the user specified source or include, then that's authoritative
-        # about the outer bound of what to measure and we don't have to apply
-        # any canned exclusions. If they didn't, then we have to exclude the
-        # stdlib and coverage.py directories.
-        if self.source_match:
-            if self.source_pkgs_match.match(modulename):
-                if modulename in self.source_pkgs:
-                    self.source_pkgs.remove(modulename)
-                return None  # There's no reason to skip this file.
-
-            if not self.source_match.match(filename):
-                return "falls outside the --source trees"
-        elif self.include_match:
-            if not self.include_match.match(filename):
-                return "falls outside the --include trees"
-        else:
-            # If we aren't supposed to trace installed code, then check if this
-            # is near the Python standard library and skip it if so.
-            if self.pylib_match and self.pylib_match.match(filename):
-                return "is in the stdlib"
-
-            # We exclude the coverage.py code itself, since a little of it
-            # will be measured otherwise.
-            if self.cover_match and self.cover_match.match(filename):
-                return "is part of coverage.py"
-
-        # Check the file against the omit pattern.
-        if self.omit_match and self.omit_match.match(filename):
-            return "is inside an --omit pattern"
-
-        # No reason found to skip this file.
-        return None
-
-    def _should_trace(self, filename, frame):
-        """Decide whether to trace execution in `filename`.
-
-        Calls `_should_trace_internal`, and returns the FileDisposition.
-
-        """
-        disp = self._should_trace_internal(filename, frame)
-        if self.debug.should('trace'):
-            self.debug.write(_disposition_debug_msg(disp))
-        return disp
-
-    def _check_include_omit_etc(self, filename, frame):
-        """Check a file name against the include/omit/etc, rules, verbosely.
-
-        Returns a boolean: True if the file should be traced, False if not.
-
-        """
-        reason = self._check_include_omit_etc_internal(filename, frame)
-        if self.debug.should('trace'):
-            if not reason:
-                msg = "Including %r" % (filename,)
-            else:
-                msg = "Not including %r: %s" % (filename, reason)
-            self.debug.write(msg)
-
-        return not reason
-
-    def _warn(self, msg):
-        """Use `msg` as a warning."""
-        self._warnings.append(msg)
-        if self.debug.should('pid'):
-            msg = "[%d] %s" % (os.getpid(), msg)
-        sys.stderr.write("Coverage.py warning: %s\n" % msg)
-
-    def get_option(self, option_name):
-        """Get an option from the configuration.
-
-        `option_name` is a colon-separated string indicating the section and
-        option name.  For example, the ``branch`` option in the ``[run]``
-        section of the config file would be indicated with `"run:branch"`.
-
-        Returns the value of the option.
-
-        .. versionadded:: 4.0
-
-        """
-        return self.config.get_option(option_name)
-
-    def set_option(self, option_name, value):
-        """Set an option in the configuration.
-
-        `option_name` is a colon-separated string indicating the section and
-        option name.  For example, the ``branch`` option in the ``[run]``
-        section of the config file would be indicated with ``"run:branch"``.
-
-        `value` is the new value for the option.  This should be a Python
-        value where appropriate.  For example, use True for booleans, not the
-        string ``"True"``.
-
-        As an example, calling::
-
-            cov.set_option("run:branch", True)
-
-        has the same effect as this configuration file::
-
-            [run]
-            branch = True
-
-        .. versionadded:: 4.0
-
-        """
-        self.config.set_option(option_name, value)
-
-    def use_cache(self, usecache):
-        """Obsolete method."""
-        self._init()
-        if not usecache:
-            self._warn("use_cache(False) is no longer supported.")
-
-    def load(self):
-        """Load previously-collected coverage data from the data file."""
-        self._init()
-        self.collector.reset()
-        self.data_files.read(self.data)
-
-    def start(self):
-        """Start measuring code coverage.
-
-        Coverage measurement actually occurs in functions called after
-        :meth:`start` is invoked.  Statements in the same scope as
-        :meth:`start` won't be measured.
-
-        Once you invoke :meth:`start`, you must also call :meth:`stop`
-        eventually, or your process might not shut down cleanly.
-
-        """
-        self._init()
-        if self.run_suffix:
-            # Calling start() means we're running code, so use the run_suffix
-            # as the data_suffix when we eventually save the data.
-            self.data_suffix = self.run_suffix
-        if self._auto_data:
-            self.load()
-
-        self.collector.start()
-        self._started = True
-        self._measured = True
-
-    def stop(self):
-        """Stop measuring code coverage."""
-        if self._started:
-            self.collector.stop()
-        self._started = False
-
-    def _atexit(self):
-        """Clean up on process shutdown."""
-        if self._started:
-            self.stop()
-        if self._auto_data:
-            self.save()
-
-    def erase(self):
-        """Erase previously-collected coverage data.
-
-        This removes the in-memory data collected in this session as well as
-        discarding the data file.
-
-        """
-        self._init()
-        self.collector.reset()
-        self.data.erase()
-        self.data_files.erase(parallel=self.config.parallel)
-
-    def clear_exclude(self, which='exclude'):
-        """Clear the exclude list."""
-        self._init()
-        setattr(self.config, which + "_list", [])
-        self._exclude_regex_stale()
-
-    def exclude(self, regex, which='exclude'):
-        """Exclude source lines from execution consideration.
-
-        A number of lists of regular expressions are maintained.  Each list
-        selects lines that are treated differently during reporting.
-
-        `which` determines which list is modified.  The "exclude" list selects
-        lines that are not considered executable at all.  The "partial" list
-        indicates lines with branches that are not taken.
-
-        `regex` is a regular expression.  The regex is added to the specified
-        list.  If any of the regexes in the list is found in a line, the line
-        is marked for special treatment during reporting.
-
-        """
-        self._init()
-        excl_list = getattr(self.config, which + "_list")
-        excl_list.append(regex)
-        self._exclude_regex_stale()
-
-    def _exclude_regex_stale(self):
-        """Drop all the compiled exclusion regexes, a list was modified."""
-        self._exclude_re.clear()
-
-    def _exclude_regex(self, which):
-        """Return a compiled regex for the given exclusion list."""
-        if which not in self._exclude_re:
-            excl_list = getattr(self.config, which + "_list")
-            self._exclude_re[which] = join_regex(excl_list)
-        return self._exclude_re[which]
-
-    def get_exclude_list(self, which='exclude'):
-        """Return a list of excluded regex patterns.
-
-        `which` indicates which list is desired.  See :meth:`exclude` for the
-        lists that are available, and their meaning.
-
-        """
-        self._init()
-        return getattr(self.config, which + "_list")
-
-    def save(self):
-        """Save the collected coverage data to the data file."""
-        self._init()
-        self.get_data()
-        self.data_files.write(self.data, suffix=self.data_suffix)
-
-    def combine(self, data_paths=None):
-        """Combine together a number of similarly-named coverage data files.
-
-        All coverage data files whose name starts with `data_file` (from the
-        coverage() constructor) will be read, and combined together into the
-        current measurements.
-
-        `data_paths` is a list of files or directories from which data should
-        be combined. If no list is passed, then the data files from the
-        directory indicated by the current data file (probably the current
-        directory) will be combined.
-
-        .. versionadded:: 4.0
-            The `data_paths` parameter.
-
-        """
-        self._init()
-        self.get_data()
-
-        aliases = None
-        if self.config.paths:
-            aliases = PathAliases()
-            for paths in self.config.paths.values():
-                result = paths[0]
-                for pattern in paths[1:]:
-                    aliases.add(pattern, result)
-
-        self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths)
-
-    def get_data(self):
-        """Get the collected data and reset the collector.
-
-        Also warn about various problems collecting data.
-
-        Returns a :class:`coverage.CoverageData`, the collected coverage data.
-
-        .. versionadded:: 4.0
-
-        """
-        self._init()
-        if not self._measured:
-            return self.data
-
-        self.collector.save_data(self.data)
-
-        # If there are still entries in the source_pkgs list, then we never
-        # encountered those packages.
-        if self._warn_unimported_source:
-            for pkg in self.source_pkgs:
-                if pkg not in sys.modules:
-                    self._warn("Module %s was never imported." % pkg)
-                elif not (
-                    hasattr(sys.modules[pkg], '__file__') and
-                    os.path.exists(sys.modules[pkg].__file__)
-                ):
-                    self._warn("Module %s has no Python source." % pkg)
-                else:
-                    self._warn("Module %s was previously imported, but not measured." % pkg)
-
-        # Find out if we got any data.
-        if not self.data and self._warn_no_data:
-            self._warn("No data was collected.")
-
-        # Find files that were never executed at all.
-        for src in self.source:
-            for py_file in find_python_files(src):
-                py_file = files.canonical_filename(py_file)
-
-                if self.omit_match and self.omit_match.match(py_file):
-                    # Turns out this file was omitted, so don't pull it back
-                    # in as unexecuted.
-                    continue
-
-                self.data.touch_file(py_file)
-
-        if self.config.note:
-            self.data.add_run_info(note=self.config.note)
-
-        self._measured = False
-        return self.data
-
-    # Backward compatibility with version 1.
-    def analysis(self, morf):
-        """Like `analysis2` but doesn't return excluded line numbers."""
-        f, s, _, m, mf = self.analysis2(morf)
-        return f, s, m, mf
-
-    def analysis2(self, morf):
-        """Analyze a module.
-
-        `morf` is a module or a file name.  It will be analyzed to determine
-        its coverage statistics.  The return value is a 5-tuple:
-
-        * The file name for the module.
-        * A list of line numbers of executable statements.
-        * A list of line numbers of excluded statements.
-        * A list of line numbers of statements not run (missing from
-          execution).
-        * A readable formatted string of the missing line numbers.
-
-        The analysis uses the source file itself and the current measured
-        coverage data.
-
-        """
-        self._init()
-        analysis = self._analyze(morf)
-        return (
-            analysis.filename,
-            sorted(analysis.statements),
-            sorted(analysis.excluded),
-            sorted(analysis.missing),
-            analysis.missing_formatted(),
-            )
-
-    def _analyze(self, it):
-        """Analyze a single morf or code unit.
-
-        Returns an `Analysis` object.
-
-        """
-        self.get_data()
-        if not isinstance(it, FileReporter):
-            it = self._get_file_reporter(it)
-
-        return Analysis(self.data, it)
-
-    def _get_file_reporter(self, morf):
-        """Get a FileReporter for a module or file name."""
-        plugin = None
-        file_reporter = "python"
-
-        if isinstance(morf, string_class):
-            abs_morf = abs_file(morf)
-            plugin_name = self.data.file_tracer(abs_morf)
-            if plugin_name:
-                plugin = self.plugins.get(plugin_name)
-
-        if plugin:
-            file_reporter = plugin.file_reporter(abs_morf)
-            if file_reporter is None:
-                raise CoverageException(
-                    "Plugin %r did not provide a file reporter for %r." % (
-                        plugin._coverage_plugin_name, morf
-                    )
-                )
-
-        if file_reporter == "python":
-            file_reporter = PythonFileReporter(morf, self)
-
-        return file_reporter
-
-    def _get_file_reporters(self, morfs=None):
-        """Get a list of FileReporters for a list of modules or file names.
-
-        For each module or file name in `morfs`, find a FileReporter.  Return
-        the list of FileReporters.
-
-        If `morfs` is a single module or file name, this returns a list of one
-        FileReporter.  If `morfs` is empty or None, then the list of all files
-        measured is used to find the FileReporters.
-
-        """
-        if not morfs:
-            morfs = self.data.measured_files()
-
-        # Be sure we have a list.
-        if not isinstance(morfs, (list, tuple)):
-            morfs = [morfs]
-
-        file_reporters = []
-        for morf in morfs:
-            file_reporter = self._get_file_reporter(morf)
-            file_reporters.append(file_reporter)
-
-        return file_reporters
-
-    def report(
-        self, morfs=None, show_missing=None, ignore_errors=None,
-        file=None,                  # pylint: disable=redefined-builtin
-        omit=None, include=None, skip_covered=None,
-    ):
-        """Write a summary report to `file`.
-
-        Each module in `morfs` is listed, with counts of statements, executed
-        statements, missing statements, and a list of lines missed.
-
-        `include` is a list of file name patterns.  Files that match will be
-        included in the report. Files matching `omit` will not be included in
-        the report.
-
-        Returns a float, the total percentage covered.
-
-        """
-        self.get_data()
-        self.config.from_args(
-            ignore_errors=ignore_errors, omit=omit, include=include,
-            show_missing=show_missing, skip_covered=skip_covered,
-            )
-        reporter = SummaryReporter(self, self.config)
-        return reporter.report(morfs, outfile=file)
-
-    def annotate(
-        self, morfs=None, directory=None, ignore_errors=None,
-        omit=None, include=None,
-    ):
-        """Annotate a list of modules.
-
-        Each module in `morfs` is annotated.  The source is written to a new
-        file, named with a ",cover" suffix, with each line prefixed with a
-        marker to indicate the coverage of the line.  Covered lines have ">",
-        excluded lines have "-", and missing lines have "!".
-
-        See :meth:`report` for other arguments.
-
-        """
-        self.get_data()
-        self.config.from_args(
-            ignore_errors=ignore_errors, omit=omit, include=include
-            )
-        reporter = AnnotateReporter(self, self.config)
-        reporter.report(morfs, directory=directory)
-
-    def html_report(self, morfs=None, directory=None, ignore_errors=None,
-                    omit=None, include=None, extra_css=None, title=None):
-        """Generate an HTML report.
-
-        The HTML is written to `directory`.  The file "index.html" is the
-        overview starting point, with links to more detailed pages for
-        individual modules.
-
-        `extra_css` is a path to a file of other CSS to apply on the page.
-        It will be copied into the HTML directory.
-
-        `title` is a text string (not HTML) to use as the title of the HTML
-        report.
-
-        See :meth:`report` for other arguments.
-
-        Returns a float, the total percentage covered.
-
-        """
-        self.get_data()
-        self.config.from_args(
-            ignore_errors=ignore_errors, omit=omit, include=include,
-            html_dir=directory, extra_css=extra_css, html_title=title,
-            )
-        reporter = HtmlReporter(self, self.config)
-        return reporter.report(morfs)
-
-    def xml_report(
-        self, morfs=None, outfile=None, ignore_errors=None,
-        omit=None, include=None,
-    ):
-        """Generate an XML report of coverage results.
-
-        The report is compatible with Cobertura reports.
-
-        Each module in `morfs` is included in the report.  `outfile` is the
-        path to write the file to, "-" will write to stdout.
-
-        See :meth:`report` for other arguments.
-
-        Returns a float, the total percentage covered.
-
-        """
-        self.get_data()
-        self.config.from_args(
-            ignore_errors=ignore_errors, omit=omit, include=include,
-            xml_output=outfile,
-            )
-        file_to_close = None
-        delete_file = False
-        if self.config.xml_output:
-            if self.config.xml_output == '-':
-                outfile = sys.stdout
-            else:
-                # Ensure that the output directory is created; done here
-                # because this report pre-opens the output file.
-                # HTMLReport does this using the Report plumbing because
-                # its task is more complex, being multiple files.
-                output_dir = os.path.dirname(self.config.xml_output)
-                if output_dir and not os.path.isdir(output_dir):
-                    os.makedirs(output_dir)
-                open_kwargs = {}
-                if env.PY3:
-                    open_kwargs['encoding'] = 'utf8'
-                outfile = open(self.config.xml_output, "w", **open_kwargs)
-                file_to_close = outfile
-        try:
-            reporter = XmlReporter(self, self.config)
-            return reporter.report(morfs, outfile=outfile)
-        except CoverageException:
-            delete_file = True
-            raise
-        finally:
-            if file_to_close:
-                file_to_close.close()
-                if delete_file:
-                    file_be_gone(self.config.xml_output)
-
-    def sys_info(self):
-        """Return a list of (key, value) pairs showing internal information."""
-
-        import coverage as covmod
-
-        self._init()
-
-        ft_plugins = []
-        for ft in self.plugins.file_tracers:
-            ft_name = ft._coverage_plugin_name
-            if not ft._coverage_enabled:
-                ft_name += " (disabled)"
-            ft_plugins.append(ft_name)
-
-        info = [
-            ('version', covmod.__version__),
-            ('coverage', covmod.__file__),
-            ('cover_dirs', self.cover_dirs),
-            ('pylib_dirs', self.pylib_dirs),
-            ('tracer', self.collector.tracer_name()),
-            ('plugins.file_tracers', ft_plugins),
-            ('config_files', self.config.attempted_config_files),
-            ('configs_read', self.config.config_files),
-            ('data_path', self.data_files.filename),
-            ('python', sys.version.replace('\n', '')),
-            ('platform', platform.platform()),
-            ('implementation', platform.python_implementation()),
-            ('executable', sys.executable),
-            ('cwd', os.getcwd()),
-            ('path', sys.path),
-            ('environment', sorted(
-                ("%s = %s" % (k, v))
-                for k, v in iitems(os.environ)
-                if k.startswith(("COV", "PY"))
-            )),
-            ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
-            ]
-
-        matcher_names = [
-            'source_match', 'source_pkgs_match',
-            'include_match', 'omit_match',
-            'cover_match', 'pylib_match',
-            ]
-
-        for matcher_name in matcher_names:
-            matcher = getattr(self, matcher_name)
-            if matcher:
-                matcher_info = matcher.info()
-            else:
-                matcher_info = '-none-'
-            info.append((matcher_name, matcher_info))
-
-        return info
-
-
-# FileDisposition "methods": FileDisposition is a pure value object, so it can
-# be implemented in either C or Python.  Acting on them is done with these
-# functions.
-
-def _disposition_init(cls, original_filename):
-    """Construct and initialize a new FileDisposition object."""
-    disp = cls()
-    disp.original_filename = original_filename
-    disp.canonical_filename = original_filename
-    disp.source_filename = None
-    disp.trace = False
-    disp.reason = ""
-    disp.file_tracer = None
-    disp.has_dynamic_filename = False
-    return disp
-
-
-def _disposition_debug_msg(disp):
-    """Make a nice debug message of what the FileDisposition is doing."""
-    if disp.trace:
-        msg = "Tracing %r" % (disp.original_filename,)
-        if disp.file_tracer:
-            msg += ": will be traced by %r" % disp.file_tracer
-    else:
-        msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
-    return msg
-
-
-def process_startup():
-    """Call this at Python start-up to perhaps measure coverage.
-
-    If the environment variable COVERAGE_PROCESS_START is defined, coverage
-    measurement is started.  The value of the variable is the config file
-    to use.
-
-    There are two ways to configure your Python installation to invoke this
-    function when Python starts:
-
-    #. Create or append to sitecustomize.py to add these lines::
-
-        import coverage
-        coverage.process_startup()
-
-    #. Create a .pth file in your Python installation containing::
-
-        import coverage; coverage.process_startup()
-
-    Returns the :class:`Coverage` instance that was started, or None if it was
-    not started by this call.
-
-    """
-    cps = os.environ.get("COVERAGE_PROCESS_START")
-    if not cps:
-        # No request for coverage, nothing to do.
-        return None
-
-    # This function can be called more than once in a process. This happens
-    # because some virtualenv configurations make the same directory visible
-    # twice in sys.path.  This means that the .pth file will be found twice,
-    # and executed twice, executing this function twice.  We set a global
-    # flag (an attribute on this function) to indicate that coverage.py has
-    # already been started, so we can avoid doing it twice.
-    #
-    # https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more
-    # details.
-
-    if hasattr(process_startup, "done"):
-        # We've annotated this function before, so we must have already
-        # started coverage.py in this process.  Nothing to do.
-        return None
-
-    process_startup.done = True
-    cov = Coverage(config_file=cps, auto_data=True)
-    cov.start()
-    cov._warn_no_data = False
-    cov._warn_unimported_source = False
-
-    return cov
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/data.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,771 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Coverage data for coverage.py."""
-
-import glob
-import itertools
-import json
-import optparse
-import os
-import os.path
-import random
-import re
-import socket
-
-from coverage import env
-from coverage.backward import iitems, string_class
-from coverage.debug import _TEST_NAME_FILE
-from coverage.files import PathAliases
-from coverage.misc import CoverageException, file_be_gone, isolate_module
-
-os = isolate_module(os)
-
-
-class CoverageData(object):
-    """Manages collected coverage data, including file storage.
-
-    This class is the public supported API to the data coverage.py collects
-    during program execution.  It includes information about what code was
-    executed. It does not include information from the analysis phase, to
-    determine what lines could have been executed, or what lines were not
-    executed.
-
-    .. note::
-
-        The file format is not documented or guaranteed.  It will change in
-        the future, in possibly complicated ways.  Do not read coverage.py
-        data files directly.  Use this API to avoid disruption.
-
-    There are a number of kinds of data that can be collected:
-
-    * **lines**: the line numbers of source lines that were executed.
-      These are always available.
-
-    * **arcs**: pairs of source and destination line numbers for transitions
-      between source lines.  These are only available if branch coverage was
-      used.
-
-    * **file tracer names**: the module names of the file tracer plugins that
-      handled each file in the data.
-
-    * **run information**: information about the program execution.  This is
-      written during "coverage run", and then accumulated during "coverage
-      combine".
-
-    Lines, arcs, and file tracer names are stored for each source file. File
-    names in this API are case-sensitive, even on platforms with
-    case-insensitive file systems.
-
-    To read a coverage.py data file, use :meth:`read_file`, or
-    :meth:`read_fileobj` if you have an already-opened file.  You can then
-    access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
-    or :meth:`file_tracer`.  Run information is available with
-    :meth:`run_infos`.
-
-    The :meth:`has_arcs` method indicates whether arc data is available.  You
-    can get a list of the files in the data with :meth:`measured_files`.
-    A summary of the line data is available from :meth:`line_counts`.  As with
-    most Python containers, you can determine if there is any data at all by
-    using this object as a boolean value.
-
-
-    Most data files will be created by coverage.py itself, but you can use
-    methods here to create data files if you like.  The :meth:`add_lines`,
-    :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
-    that are convenient for coverage.py.  The :meth:`add_run_info` method adds
-    key-value pairs to the run information.
-
-    To add a file without any measured data, use :meth:`touch_file`.
-
-    You write to a named file with :meth:`write_file`, or to an already opened
-    file with :meth:`write_fileobj`.
-
-    You can clear the data in memory with :meth:`erase`.  Two data collections
-    can be combined by using :meth:`update` on one :class:`CoverageData`,
-    passing it the other.
-
-    """
-
-    # The data file format is JSON, with these keys:
-    #
-    #     * lines: a dict mapping file names to lists of line numbers
-    #       executed::
-    #
-    #         { "file1": [17,23,45], "file2": [1,2,3], ... }
-    #
-    #     * arcs: a dict mapping file names to lists of line number pairs::
-    #
-    #         { "file1": [[17,23], [17,25], [25,26]], ... }
-    #
-    #     * file_tracers: a dict mapping file names to plugin names::
-    #
-    #         { "file1": "django.coverage", ... }
-    #
-    #     * runs: a list of dicts of information about the coverage.py runs
-    #       contributing to the data::
-    #
-    #         [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
-    #
-    # Only one of `lines` or `arcs` will be present: with branch coverage, data
-    # is stored as arcs. Without branch coverage, it is stored as lines.  The
-    # line data is easily recovered from the arcs: it is all the first elements
-    # of the pairs that are greater than zero.
-
-    def __init__(self, debug=None):
-        """Create a CoverageData.
-
-        `debug` is a `DebugControl` object for writing debug messages.
-
-        """
-        self._debug = debug
-
-        # A map from canonical Python source file name to a dictionary in
-        # which there's an entry for each line number that has been
-        # executed:
-        #
-        #   { 'filename1.py': [12, 47, 1001], ... }
-        #
-        self._lines = None
-
-        # A map from canonical Python source file name to a dictionary with an
-        # entry for each pair of line numbers forming an arc:
-        #
-        #   { 'filename1.py': [(12,14), (47,48), ... ], ... }
-        #
-        self._arcs = None
-
-        # A map from canonical source file name to a plugin module name:
-        #
-        #   { 'filename1.py': 'django.coverage', ... }
-        #
-        self._file_tracers = {}
-
-        # A list of dicts of information about the coverage.py runs.
-        self._runs = []
-
-    def __repr__(self):
-        return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
-            klass=self.__class__.__name__,
-            lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
-            arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
-            tracers="{{{0}}}".format(len(self._file_tracers)),
-            runs="[{0}]".format(len(self._runs)),
-        )
-
-    ##
-    ## Reading data
-    ##
-
-    def has_arcs(self):
-        """Does this data have arcs?
-
-        Arc data is only available if branch coverage was used during
-        collection.
-
-        Returns a boolean.
-
-        """
-        return self._has_arcs()
-
-    def lines(self, filename):
-        """Get the list of lines executed for a file.
-
-        If the file was not measured, returns None.  A file might be measured,
-        and have no lines executed, in which case an empty list is returned.
-
-        If the file was executed, returns a list of integers, the line numbers
-        executed in the file. The list is in no particular order.
-
-        """
-        if self._arcs is not None:
-            arcs = self._arcs.get(filename)
-            if arcs is not None:
-                all_lines = itertools.chain.from_iterable(arcs)
-                return list(set(l for l in all_lines if l > 0))
-        elif self._lines is not None:
-            return self._lines.get(filename)
-        return None
-
-    def arcs(self, filename):
-        """Get the list of arcs executed for a file.
-
-        If the file was not measured, returns None.  A file might be measured,
-        and have no arcs executed, in which case an empty list is returned.
-
-        If the file was executed, returns a list of 2-tuples of integers. Each
-        pair is a starting line number and an ending line number for a
-        transition from one line to another. The list is in no particular
-        order.
-
-        Negative numbers have special meaning.  If the starting line number is
-        -N, it represents an entry to the code object that starts at line N.
-        If the ending ling number is -N, it's an exit from the code object that
-        starts at line N.
-
-        """
-        if self._arcs is not None:
-            if filename in self._arcs:
-                return self._arcs[filename]
-        return None
-
-    def file_tracer(self, filename):
-        """Get the plugin name of the file tracer for a file.
-
-        Returns the name of the plugin that handles this file.  If the file was
-        measured, but didn't use a plugin, then "" is returned.  If the file
-        was not measured, then None is returned.
-
-        """
-        # Because the vast majority of files involve no plugin, we don't store
-        # them explicitly in self._file_tracers.  Check the measured data
-        # instead to see if it was a known file with no plugin.
-        if filename in (self._arcs or self._lines or {}):
-            return self._file_tracers.get(filename, "")
-        return None
-
-    def run_infos(self):
-        """Return the list of dicts of run information.
-
-        For data collected during a single run, this will be a one-element
-        list.  If data has been combined, there will be one element for each
-        original data file.
-
-        """
-        return self._runs
-
-    def measured_files(self):
-        """A list of all files that had been measured."""
-        return list(self._arcs or self._lines or {})
-
-    def line_counts(self, fullpath=False):
-        """Return a dict summarizing the line coverage data.
-
-        Keys are based on the file names, and values are the number of executed
-        lines.  If `fullpath` is true, then the keys are the full pathnames of
-        the files, otherwise they are the basenames of the files.
-
-        Returns a dict mapping file names to counts of lines.
-
-        """
-        summ = {}
-        if fullpath:
-            filename_fn = lambda f: f
-        else:
-            filename_fn = os.path.basename
-        for filename in self.measured_files():
-            summ[filename_fn(filename)] = len(self.lines(filename))
-        return summ
-
-    def __nonzero__(self):
-        return bool(self._lines or self._arcs)
-
-    __bool__ = __nonzero__
-
-    def read_fileobj(self, file_obj):
-        """Read the coverage data from the given file object.
-
-        Should only be used on an empty CoverageData object.
-
-        """
-        data = self._read_raw_data(file_obj)
-
-        self._lines = self._arcs = None
-
-        if 'lines' in data:
-            self._lines = data['lines']
-        if 'arcs' in data:
-            self._arcs = dict(
-                (fname, [tuple(pair) for pair in arcs])
-                for fname, arcs in iitems(data['arcs'])
-            )
-        self._file_tracers = data.get('file_tracers', {})
-        self._runs = data.get('runs', [])
-
-        self._validate()
-
-    def read_file(self, filename):
-        """Read the coverage data from `filename` into this object."""
-        if self._debug and self._debug.should('dataio'):
-            self._debug.write("Reading data from %r" % (filename,))
-        try:
-            with self._open_for_reading(filename) as f:
-                self.read_fileobj(f)
-        except Exception as exc:
-            raise CoverageException(
-                "Couldn't read data from '%s': %s: %s" % (
-                    filename, exc.__class__.__name__, exc,
-                )
-            )
-
-    _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
-
-    @classmethod
-    def _open_for_reading(cls, filename):
-        """Open a file appropriately for reading data."""
-        return open(filename, "r")
-
-    @classmethod
-    def _read_raw_data(cls, file_obj):
-        """Read the raw data from a file object."""
-        go_away = file_obj.read(len(cls._GO_AWAY))
-        if go_away != cls._GO_AWAY:
-            raise CoverageException("Doesn't seem to be a coverage.py data file")
-        return json.load(file_obj)
-
-    @classmethod
-    def _read_raw_data_file(cls, filename):
-        """Read the raw data from a file, for debugging."""
-        with cls._open_for_reading(filename) as f:
-            return cls._read_raw_data(f)
-
-    ##
-    ## Writing data
-    ##
-
-    def add_lines(self, line_data):
-        """Add measured line data.
-
-        `line_data` is a dictionary mapping file names to dictionaries::
-
-            { filename: { lineno: None, ... }, ...}
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding lines: %d files, %d lines total" % (
-                len(line_data), sum(len(lines) for lines in line_data.values())
-            ))
-        if self._has_arcs():
-            raise CoverageException("Can't add lines to existing arc data")
-
-        if self._lines is None:
-            self._lines = {}
-        for filename, linenos in iitems(line_data):
-            if filename in self._lines:
-                new_linenos = set(self._lines[filename])
-                new_linenos.update(linenos)
-                linenos = new_linenos
-            self._lines[filename] = list(linenos)
-
-        self._validate()
-
-    def add_arcs(self, arc_data):
-        """Add measured arc data.
-
-        `arc_data` is a dictionary mapping file names to dictionaries::
-
-            { filename: { (l1,l2): None, ... }, ...}
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding arcs: %d files, %d arcs total" % (
-                len(arc_data), sum(len(arcs) for arcs in arc_data.values())
-            ))
-        if self._has_lines():
-            raise CoverageException("Can't add arcs to existing line data")
-
-        if self._arcs is None:
-            self._arcs = {}
-        for filename, arcs in iitems(arc_data):
-            if filename in self._arcs:
-                new_arcs = set(self._arcs[filename])
-                new_arcs.update(arcs)
-                arcs = new_arcs
-            self._arcs[filename] = list(arcs)
-
-        self._validate()
-
-    def add_file_tracers(self, file_tracers):
-        """Add per-file plugin information.
-
-        `file_tracers` is { filename: plugin_name, ... }
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
-
-        existing_files = self._arcs or self._lines or {}
-        for filename, plugin_name in iitems(file_tracers):
-            if filename not in existing_files:
-                raise CoverageException(
-                    "Can't add file tracer data for unmeasured file '%s'" % (filename,)
-                )
-            existing_plugin = self._file_tracers.get(filename)
-            if existing_plugin is not None and plugin_name != existing_plugin:
-                raise CoverageException(
-                    "Conflicting file tracer name for '%s': %r vs %r" % (
-                        filename, existing_plugin, plugin_name,
-                    )
-                )
-            self._file_tracers[filename] = plugin_name
-
-        self._validate()
-
-    def add_run_info(self, **kwargs):
-        """Add information about the run.
-
-        Keywords are arbitrary, and are stored in the run dictionary. Values
-        must be JSON serializable.  You may use this function more than once,
-        but repeated keywords overwrite each other.
-
-        """
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Adding run info: %r" % (kwargs,))
-        if not self._runs:
-            self._runs = [{}]
-        self._runs[0].update(kwargs)
-        self._validate()
-
-    def touch_file(self, filename):
-        """Ensure that `filename` appears in the data, empty if needed."""
-        if self._debug and self._debug.should('dataop'):
-            self._debug.write("Touching %r" % (filename,))
-        if not self._has_arcs() and not self._has_lines():
-            raise CoverageException("Can't touch files in an empty CoverageData")
-
-        if self._has_arcs():
-            where = self._arcs
-        else:
-            where = self._lines
-        where.setdefault(filename, [])
-
-        self._validate()
-
-    def write_fileobj(self, file_obj):
-        """Write the coverage data to `file_obj`."""
-
-        # Create the file data.
-        file_data = {}
-
-        if self._has_arcs():
-            file_data['arcs'] = self._arcs
-
-        if self._has_lines():
-            file_data['lines'] = self._lines
-
-        if self._file_tracers:
-            file_data['file_tracers'] = self._file_tracers
-
-        if self._runs:
-            file_data['runs'] = self._runs
-
-        # Write the data to the file.
-        file_obj.write(self._GO_AWAY)
-        json.dump(file_data, file_obj)
-
-    def write_file(self, filename):
-        """Write the coverage data to `filename`."""
-        if self._debug and self._debug.should('dataio'):
-            self._debug.write("Writing data to %r" % (filename,))
-        with open(filename, 'w') as fdata:
-            self.write_fileobj(fdata)
-
-    def erase(self):
-        """Erase the data in this object."""
-        self._lines = None
-        self._arcs = None
-        self._file_tracers = {}
-        self._runs = []
-        self._validate()
-
-    def update(self, other_data, aliases=None):
-        """Update this data with data from another `CoverageData`.
-
-        If `aliases` is provided, it's a `PathAliases` object that is used to
-        re-map paths to match the local machine's.
-
-        """
-        if self._has_lines() and other_data._has_arcs():
-            raise CoverageException("Can't combine arc data with line data")
-        if self._has_arcs() and other_data._has_lines():
-            raise CoverageException("Can't combine line data with arc data")
-
-        aliases = aliases or PathAliases()
-
-        # _file_tracers: only have a string, so they have to agree.
-        # Have to do these first, so that our examination of self._arcs and
-        # self._lines won't be confused by data updated from other_data.
-        for filename in other_data.measured_files():
-            other_plugin = other_data.file_tracer(filename)
-            filename = aliases.map(filename)
-            this_plugin = self.file_tracer(filename)
-            if this_plugin is None:
-                if other_plugin:
-                    self._file_tracers[filename] = other_plugin
-            elif this_plugin != other_plugin:
-                raise CoverageException(
-                    "Conflicting file tracer name for '%s': %r vs %r" % (
-                        filename, this_plugin, other_plugin,
-                    )
-                )
-
-        # _runs: add the new runs to these runs.
-        self._runs.extend(other_data._runs)
-
-        # _lines: merge dicts.
-        if other_data._has_lines():
-            if self._lines is None:
-                self._lines = {}
-            for filename, file_lines in iitems(other_data._lines):
-                filename = aliases.map(filename)
-                if filename in self._lines:
-                    lines = set(self._lines[filename])
-                    lines.update(file_lines)
-                    file_lines = list(lines)
-                self._lines[filename] = file_lines
-
-        # _arcs: merge dicts.
-        if other_data._has_arcs():
-            if self._arcs is None:
-                self._arcs = {}
-            for filename, file_arcs in iitems(other_data._arcs):
-                filename = aliases.map(filename)
-                if filename in self._arcs:
-                    arcs = set(self._arcs[filename])
-                    arcs.update(file_arcs)
-                    file_arcs = list(arcs)
-                self._arcs[filename] = file_arcs
-
-        self._validate()
-
-    ##
-    ## Miscellaneous
-    ##
-
-    def _validate(self):
-        """If we are in paranoid mode, validate that everything is right."""
-        if env.TESTING:
-            self._validate_invariants()
-
-    def _validate_invariants(self):
-        """Validate internal invariants."""
-        # Only one of _lines or _arcs should exist.
-        assert not(self._has_lines() and self._has_arcs()), (
-            "Shouldn't have both _lines and _arcs"
-        )
-
-        # _lines should be a dict of lists of ints.
-        if self._has_lines():
-            for fname, lines in iitems(self._lines):
-                assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
-                assert all(isinstance(x, int) for x in lines), (
-                    "_lines[%r] shouldn't be %r" % (fname, lines)
-                )
-
-        # _arcs should be a dict of lists of pairs of ints.
-        if self._has_arcs():
-            for fname, arcs in iitems(self._arcs):
-                assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
-                assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
-                    "_arcs[%r] shouldn't be %r" % (fname, arcs)
-                )
-
-        # _file_tracers should have only non-empty strings as values.
-        for fname, plugin in iitems(self._file_tracers):
-            assert isinstance(fname, string_class), (
-                "Key in _file_tracers shouldn't be %r" % (fname,)
-            )
-            assert plugin and isinstance(plugin, string_class), (
-                "_file_tracers[%r] shoudn't be %r" % (fname, plugin)
-            )
-
-        # _runs should be a list of dicts.
-        for val in self._runs:
-            assert isinstance(val, dict)
-            for key in val:
-                assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
-
-    def add_to_hash(self, filename, hasher):
-        """Contribute `filename`'s data to the `hasher`.
-
-        `hasher` is a `coverage.misc.Hasher` instance to be updated with
-        the file's data.  It should only get the results data, not the run
-        data.
-
-        """
-        if self._has_arcs():
-            hasher.update(sorted(self.arcs(filename) or []))
-        else:
-            hasher.update(sorted(self.lines(filename) or []))
-        hasher.update(self.file_tracer(filename))
-
-    ##
-    ## Internal
-    ##
-
-    def _has_lines(self):
-        """Do we have data in self._lines?"""
-        return self._lines is not None
-
-    def _has_arcs(self):
-        """Do we have data in self._arcs?"""
-        return self._arcs is not None
-
-
-class CoverageDataFiles(object):
-    """Manage the use of coverage data files."""
-
-    def __init__(self, basename=None, warn=None):
-        """Create a CoverageDataFiles to manage data files.
-
-        `warn` is the warning function to use.
-
-        `basename` is the name of the file to use for storing data.
-
-        """
-        self.warn = warn
-        # Construct the file name that will be used for data storage.
-        self.filename = os.path.abspath(basename or ".coverage")
-
-    def erase(self, parallel=False):
-        """Erase the data from the file storage.
-
-        If `parallel` is true, then also deletes data files created from the
-        basename by parallel-mode.
-
-        """
-        file_be_gone(self.filename)
-        if parallel:
-            data_dir, local = os.path.split(self.filename)
-            localdot = local + '.*'
-            pattern = os.path.join(os.path.abspath(data_dir), localdot)
-            for filename in glob.glob(pattern):
-                file_be_gone(filename)
-
-    def read(self, data):
-        """Read the coverage data."""
-        if os.path.exists(self.filename):
-            data.read_file(self.filename)
-
-    def write(self, data, suffix=None):
-        """Write the collected coverage data to a file.
-
-        `suffix` is a suffix to append to the base file name. This can be used
-        for multiple or parallel execution, so that many coverage data files
-        can exist simultaneously.  A dot will be used to join the base name and
-        the suffix.
-
-        """
-        filename = self.filename
-        if suffix is True:
-            # If data_suffix was a simple true value, then make a suffix with
-            # plenty of distinguishing information.  We do this here in
-            # `save()` at the last minute so that the pid will be correct even
-            # if the process forks.
-            extra = ""
-            if _TEST_NAME_FILE:                             # pragma: debugging
-                with open(_TEST_NAME_FILE) as f:
-                    test_name = f.read()
-                extra = "." + test_name
-            suffix = "%s%s.%s.%06d" % (
-                socket.gethostname(), extra, os.getpid(),
-                random.randint(0, 999999)
-            )
-
-        if suffix:
-            filename += "." + suffix
-        data.write_file(filename)
-
-    def combine_parallel_data(self, data, aliases=None, data_paths=None):
-        """Combine a number of data files together.
-
-        Treat `self.filename` as a file prefix, and combine the data from all
-        of the data files starting with that prefix plus a dot.
-
-        If `aliases` is provided, it's a `PathAliases` object that is used to
-        re-map paths to match the local machine's.
-
-        If `data_paths` is provided, it is a list of directories or files to
-        combine.  Directories are searched for files that start with
-        `self.filename` plus dot as a prefix, and those files are combined.
-
-        If `data_paths` is not provided, then the directory portion of
-        `self.filename` is used as the directory to search for data files.
-
-        Every data file found and combined is then deleted from disk. If a file
-        cannot be read, a warning will be issued, and the file will not be
-        deleted.
-
-        """
-        # Because of the os.path.abspath in the constructor, data_dir will
-        # never be an empty string.
-        data_dir, local = os.path.split(self.filename)
-        localdot = local + '.*'
-
-        data_paths = data_paths or [data_dir]
-        files_to_combine = []
-        for p in data_paths:
-            if os.path.isfile(p):
-                files_to_combine.append(os.path.abspath(p))
-            elif os.path.isdir(p):
-                pattern = os.path.join(os.path.abspath(p), localdot)
-                files_to_combine.extend(glob.glob(pattern))
-            else:
-                raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
-
-        for f in files_to_combine:
-            new_data = CoverageData()
-            try:
-                new_data.read_file(f)
-            except CoverageException as exc:
-                if self.warn:
-                    # The CoverageException has the file name in it, so just
-                    # use the message as the warning.
-                    self.warn(str(exc))
-            else:
-                data.update(new_data, aliases=aliases)
-                file_be_gone(f)
-
-
-def canonicalize_json_data(data):
-    """Canonicalize our JSON data so it can be compared."""
-    for fname, lines in iitems(data.get('lines', {})):
-        data['lines'][fname] = sorted(lines)
-    for fname, arcs in iitems(data.get('arcs', {})):
-        data['arcs'][fname] = sorted(arcs)
-
-
-def pretty_data(data):
-    """Format data as JSON, but as nicely as possible.
-
-    Returns a string.
-
-    """
-    # Start with a basic JSON dump.
-    out = json.dumps(data, indent=4, sort_keys=True)
-    # But pairs of numbers shouldn't be split across lines...
-    out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
-    # Trailing spaces mess with tests, get rid of them.
-    out = re.sub(r"(?m)\s+$", "", out)
-    return out
-
-
-def debug_main(args):
-    """Dump the raw data from data files.
-
-    Run this as::
-
-        $ python -m coverage.data [FILE]
-
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        "-c", "--canonical", action="store_true",
-        help="Sort data into a canonical order",
-    )
-    options, args = parser.parse_args(args)
-
-    for filename in (args or [".coverage"]):
-        print("--- {0} ------------------------------".format(filename))
-        data = CoverageData._read_raw_data_file(filename)
-        if options.canonical:
-            canonicalize_json_data(data)
-        print(pretty_data(data))
-
-
-if __name__ == '__main__':
-    import sys
-    debug_main(sys.argv[1:])
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/debug.py	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,109 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-"""Control of and utilities for debugging."""
-
-import inspect
-import os
-import sys
-
-from coverage.misc import isolate_module
-
-os = isolate_module(os)
-
-
-# When debugging, it can be helpful to force some options, especially when
-# debugging the configuration mechanisms you usually use to control debugging!
-# This is a list of forced debugging options.
-FORCED_DEBUG = []
-
-# A hack for debugging testing in sub-processes.
-_TEST_NAME_FILE = ""    # "/tmp/covtest.txt"
-
-
-class DebugControl(object):
-    """Control and output for debugging."""
-
-    def __init__(self, options, output):
-        """Configure the options and output file for debugging."""
-        self.options = options
-        self.output = output
-
-    def __repr__(self):
-        return "<DebugControl options=%r output=%r>" % (self.options, self.output)
-
-    def should(self, option):
-        """Decide whether to output debug information in category `option`."""
-        return (option in self.options or option in FORCED_DEBUG)
-
-    def write(self, msg):
-        """Write a line of debug output."""
-        if self.should('pid'):
-            msg = "pid %5d: %s" % (os.getpid(), msg)
-        self.output.write(msg+"\n")
-        if self.should('callers'):
-            dump_stack_frames(out=self.output)
-        self.output.flush()
-
-    def write_formatted_info(self, header, info):
-        """Write a sequence of (label,data) pairs nicely."""
-        self.write(info_header(header))
-        for line in info_formatter(info):
-            self.write(" %s" % line)
-
-
-def info_header(label):
-    """Make a nice header string."""
-    return "--{0:-<60s}".format(" "+label+" ")
-
-
-def info_formatter(info):
-    """Produce a sequence of formatted lines from info.
-
-    `info` is a sequence of pairs (label, data).  The produced lines are
-    nicely formatted, ready to print.
-
-    """
-    info = list(info)
-    if not info:
-        return
-    label_len = max(len(l) for l, _d in info)
-    for label, data in info:
-        if data == []:
-            data = "-none-"
-        if isinstance(data, (list, set, tuple)):
-            prefix = "%*s:" % (label_len, label)
-            for e in data:
-                yield "%*s %s" % (label_len+1, prefix, e)
-                prefix = ""
-        else:
-            yield "%*s: %s" % (label_len, label, data)
-
-
-def short_stack(limit=None):                                # pragma: debugging
-    """Return a string summarizing the call stack.
-
-    The string is multi-line, with one line per stack frame. Each line shows
-    the function name, the file name, and the line number:
-
-        ...
-        start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
-        import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
-        import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
-        ...
-
-    `limit` is the number of frames to include, defaulting to all of them.
-
-    """
-    stack = inspect.stack()[limit:0:-1]
-    return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack)
-
-
-def dump_stack_frames(limit=None, out=None):                # pragma: debugging
-    """Print a summary of the stack to stdout, or some place else."""
-    out = out or sys.stdout
-    out.write(short_stack(limit=limit))
-    out.write("\n")
-
-#
-# eflag: FileType = Python2
--- a/DebugClients/Python/coverage/doc/AUTHORS.txt	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-Coverage.py was originally written by Gareth Rees, and since 2004 has been
-extended and maintained by Ned Batchelder.
-
-Other contributions have been made by:
-
-Adi Roiban
-Alex Gaynor
-Alexander Todorov
-Anthony Sottile
-Arcadiy Ivanov
-Ben Finney
-Bill Hart
-Brandon Rhodes
-Brett Cannon
-Buck Evan
-Carl Gieringer
-Catherine Proulx
-Chris Adams
-Chris Rose
-Christian Heimes
-Christine Lytwynec
-Christoph Zwerschke
-Conrad Ho
-Danek Duvall
-Danny Allen
-David Christian
-David Stanek
-Detlev Offenbach
-Devin Jeanpierre
-Dmitry Shishov
-Dmitry Trofimov
-Eduardo Schettino
-Edward Loper
-Geoff Bache
-George Paci
-George Song
-Greg Rogers
-Guillaume Chazarain
-Ilia Meerovich
-Imri Goldberg
-Ionel Cristian Mărieș
-JT Olds
-Jessamyn Smith
-Jon Chappell
-Joseph Tate
-Julian Berman
-Krystian Kichewko
-Leonardo Pistone
-Lex Berezhny
-Marc Abramowitz
-Marcus Cobden
-Mark van der Wal
-Martin Fuzzey
-Matthew Desmarais
-Max Linke
-Mickie Betz
-Noel O'Boyle
-Pablo Carballo
-Patrick Mezard
-Peter Portante
-Rodrigue Cloutier
-Roger Hu
-Ross Lawley
-Sandra Martocchia
-Sigve Tjora
-Stan Hu
-Stefan Behnel
-Steve Leonard
-Steve Peak
-Ted Wexler
-Titus Brown
-Yury Selivanov
-Zooko Wilcox-O'Hearn
--- a/DebugClients/Python/coverage/doc/CHANGES.rst	Fri Sep 02 19:08:02 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1654 +0,0 @@
-.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
-
-==============================
-Change history for Coverage.py
-==============================
-
-
-Version 4.1 --- 2016-05-21
---------------------------
-
-- The internal attribute `Reporter.file_reporters` was removed in 4.1b3.  It
-  should have come has no surprise that there were third-party tools out there
-  using that attribute.  It has been restored, but with a deprecation warning.
-
-
-Version 4.1b3 --- 2016-05-10
-----------------------------
-
-- When running your program, execution can jump from an ``except X:`` line to
-  some other line when an exception other than ``X`` happens.  This jump is no
-  longer considered a branch when measuring branch coverage.
-
-- When measuring branch coverage, ``yield`` statements that were never resumed
-  were incorrectly marked as missing, as reported in `issue 440`_.  This is now
-  fixed.
-
-- During branch coverage of single-line callables like lambdas and generator
-  expressions, coverage.py can now distinguish between them never being called,
-  or being called but not completed.  Fixes `issue 90`_, `issue 460`_ and
-  `issue 475`_.
-
-- The HTML report now has a map of the file along the rightmost edge of the
-  page, giving an overview of where the missed lines are.  Thanks, Dmitry
-  Shishov.
-
-- The HTML report now uses different monospaced fonts, favoring Consolas over
-  Courier.  Along the way, `issue 472`_ about not properly handling one-space
-  indents was fixed.  The index page also has slightly different styling, to
-  try to make the clickable detail pages more apparent.
-
-- Missing branches reported with ``coverage report -m`` will now say ``->exit``
-  for missed branches to the exit of a function, rather than a negative number.
-  Fixes `issue 469`_.
-
-- ``coverage --help`` and ``coverage --version`` now mention which tracer is
-  installed, to help diagnose problems. The docs mention which features need
-  the C extension. (`issue 479`_)
-
-- Officially support PyPy 5.1, which required no changes, just updates to the
-  docs.
-
-- The `Coverage.report` function had two parameters with non-None defaults,
-  which have been changed.  `show_missing` used to default to True, but now
-  defaults to None.  If you had been calling `Coverage.report` without
-  specifying `show_missing`, you'll need to explicitly set it to True to keep
-  the same behavior.  `skip_covered` used to default to False. It is now None,
-  which doesn't change the behavior.  This fixes `issue 485`_.
-
-- It's never been possible to pass a namespace module to one of the analysis
-  functions, but now at least we raise a more specific error message, rather
-  than getting confused. (`issue 456`_)
-
-- The `coverage.process_startup` function now returns the `Coverage` instance
-  it creates, as suggested in `issue 481`_.
-
-- Make a small tweak to how we compare threads, to avoid buggy custom
-  comparison code in thread classes. (`issue 245`_)
-
-.. _issue 90: https://bitbucket.org/ned/coveragepy/issues/90/lambda-expression-confuses-branch
-.. _issue 245: https://bitbucket.org/ned/coveragepy/issues/245/change-solution-for-issue-164
-.. _issue 440: https://bitbucket.org/ned/coveragepy/issues/440/yielded-twisted-failure-marked-as-missed
-.. _issue 456: https://bitbucket.org/ned/coveragepy/issues/456/coverage-breaks-with-implicit-namespaces
-.. _issue 460: https://bitbucket.org/ned/coveragepy/issues/460/confusing-html-report-for-certain-partial
-.. _issue 469: https://bitbucket.org/ned/coveragepy/issues/469/strange-1-line-number-in-branch-coverage
-.. _issue 472: https://bitbucket.org/ned/coveragepy/issues/472/html-report-indents-incorrectly-for-one
-.. _issue 475: https://bitbucket.org/ned/coveragepy/issues/475/generator-expression-is-marked-as-not
-.. _issue 479: https://bitbucket.org/ned/coveragepy/issues/479/clarify-the-need-for-the-c-extension
-.. _issue 481: https://bitbucket.org/ned/coveragepy/issues/481/asyncioprocesspoolexecutor-tracing-not
-.. _issue 485: https://bitbucket.org/ned/coveragepy/issues/485/coveragereport-ignores-show_missing-and
-
-
-Version 4.1b2 --- 2016-01-23
-----------------------------
-
-- Problems with the new branch measurement in 4.1 beta 1 were fixed:
-
-  - Class docstrings were considered executable.  Now they no longer are.
-
-  - ``yield from`` and ``await`` were considered returns from functions, since
-    they could tranfer control to the caller.  This produced unhelpful "missing
-    branch" reports in a number of circumstances.  Now they no longer are
-    considered returns.
-
-  - In unusual situations, a missing branch to a negative number was reported.
-    This has been fixed, closing `issue 466`_.
-
-- The XML report now produces correct package names for modules found in
-  directories specified with ``source=``.  Fixes `issue 465`_.
-
-- ``coverage report`` won't produce trailing whitespace.
-
-.. _issue 465: https://bitbucket.org/ned/coveragepy/issues/465/coveragexml-produces-package-names-with-an
-.. _issue 466: https://bitbucket.org/ned/coveragepy/issues/466/impossible-missed-branch-to-a-negative
-
-
-Version 4.1b1 --- 2016-01-10
-----------------------------
-
-- Branch analysis has been rewritten: it used to be based on bytecode, but now
-  uses AST analysis.  This has changed a number of things:
-
-  - More code paths are now considered runnable, especially in
-    ``try``/``except`` structures.  This may mean that coverage.py will
-    identify more code paths as uncovered.  This could either raise or lower
-    your overall coverage number.
-
-  - Python 3.5's ``async`` and ``await`` keywords are properly supported,
-    fixing `issue 434`_.
-
-  - Some long-standing branch coverage bugs were fixed:
-
-    - `issue 129`_: functions with only a docstring for a body would
-      incorrectly report a missing branch on the ``def`` line.
-
-    - `issue 212`_: code in an ``except`` block could be incorrectly marked as
-      a missing branch.
-
-    - `issue 146`_: context managers (``with`` statements) in a loop or ``try``
-      block could confuse the branch measurement, reporting incorrect partial
-      branches.
-
-    - `issue 422`_: in Python 3.5, an actual partial branch could be marked as
-      complete.
-
-- Pragmas to disable coverage measurement can now be used on decorator lines,
-  and they will apply to the entire function or class being decorated.  This
-  implements the feature requested in `issue 131`_.
-
-- Multiprocessing support is now available on Windows.  Thanks, Rodrigue
-  Cloutier.
-
-- Files with two encoding declarations are properly supported, fixing
-  `issue 453`_. Thanks, Max Linke.
-
-- Non-ascii characters in regexes in the configuration file worked in 3.7, but
-  stopped working in 4.0.  Now they work again, closing `issue 455`_.
-
-- Form-feed characters would prevent accurate determination of the beginning of
-  statements in the rest of the file.  This is now fixed, closing `issue 461`_.
-
-.. _issue 129: https://bitbucket.org/ned/coveragepy/issues/129/misleading-branch-coverage-of-empty
-.. _issue 131: https://bitbucket.org/ned/coveragepy/issues/131/pragma-on-a-decorator-line-should-affect
-.. _issue 146: https://bitbucket.org/ned/coveragepy/issues/146/context-managers-confuse-branch-coverage
-.. _issue 212: https://bitbucket.org/ned/coveragepy/issues/212/coverage-erroneously-reports-partial
-.. _issue 422: https://bitbucket.org/ned/coveragepy/issues/422/python35-partial-branch-marked-as-fully
-.. _issue 434: https://bitbucket.org/ned/coveragepy/issues/434/indexerror-in-python-35
-.. _issue 453: https://bitbucket.org/ned/coveragepy/issues/453/source-code-encoding-can-only-be-specified
-.. _issue 455: https://bitbucket.org/ned/coveragepy/issues/455/unusual-exclusions-stopped-working-in
-.. _issue 461: https://bitbucket.org/ned/coveragepy/issues/461/multiline-asserts-need-too-many-pragma
-
-
-Version 4.0.3 --- 2015-11-24
-----------------------------
-
-- Fixed a mysterious problem that manifested in different ways: sometimes
-  hanging the process (`issue 420`_), sometimes making database connections
-  fail (`issue 445`_).
-
-- The XML report now has correct ``<source>`` elements when using a
-  ``--source=`` option somewhere besides the current directory.  This fixes
-  `issue 439`_. Thanks, Arcady Ivanov.
-
-- Fixed an unusual edge case of detecting source encodings, described in
-  `issue 443`_.
-
-- Help messages that mention the command to use now properly use the actual
-  command name, which might be different than "coverage".  Thanks to Ben
-  Finney, this closes `issue 438`_.
-
-.. _issue 420: https://bitbucket.org/ned/coveragepy/issues/420/coverage-40-hangs-indefinitely-on-python27
-.. _issue 438: https://bitbucket.org/ned/coveragepy/issues/438/parameterise-coverage-command-name
-.. _issue 439: https://bitbucket.org/ned/coveragepy/issues/439/incorrect-cobertura-file-sources-generated
-.. _issue 443: https://bitbucket.org/ned/coveragepy/issues/443/coverage-gets-confused-when-encoding
-.. _issue 445: https://bitbucket.org/ned/coveragepy/issues/445/django-app-cannot-connect-to-cassandra
-
-
-Version 4.0.2 --- 2015-11-04
-----------------------------
-
-- More work on supporting unusually encoded source. Fixed `issue 431`_.
-
-- Files or directories with non-ASCII characters are now handled properly,
-  fixing `issue 432`_.
-
-- Setting a trace function with sys.settrace was broken by a change in 4.0.1,
-  as reported in `issue 436`_.  This is now fixed.
-
-- Officially support PyPy 4.0, which required no changes, just updates to the
-  docs.
-
-.. _issue 431: https://bitbucket.org/ned/coveragepy/issues/431/couldnt-parse-python-file-with-cp1252
-.. _issue 432: https://bitbucket.org/ned/coveragepy/issues/432/path-with-unicode-characters-various
-.. _issue 436: https://bitbucket.org/ned/coveragepy/issues/436/disabled-coverage-ctracer-may-rise-from
-
-
-Version 4.0.1 --- 2015-10-13
-----------------------------
-
-- When combining data files, unreadable files will now generate a warning
-  instead of failing the command.  This is more in line with the older
-  coverage.py v3.7.1 behavior, which silently ignored unreadable files.
-  Prompted by `issue 418`_.
-
-- The --skip-covered option would skip reporting on 100% covered files, but
-  also skipped them when calculating total coverage.  This was wrong, it should
-  only remove lines from the report, not change the final answer.  This is now
-  fixed, closing `issue 423`_.
-
-- In 4.0, the data file recorded a summary of the system on which it was run.
-  Combined data files would keep all of those summaries.  This could lead to
-  enormous data files consisting of mostly repetitive useless information. That
-  summary is now gone, fixing `issue 415`_.  If you want summary information,
-  get in touch, and we'll figure out a better way to do it.
-
-- Test suites that mocked os.path.exists would experience strange failures, due
-  to coverage.py using their mock inadvertently.  This is now fixed, closing
-  `issue 416`_.
-
-- Importing a ``__init__`` module explicitly would lead to an error:
-  ``AttributeError: 'module' object has no attribute '__path__'``, as reported
-  in `issue 410`_.  This is now fixed.
-
-- Code that uses ``sys.settrace(sys.gettrace())`` used to incur a more than 2x
-  speed penalty.  Now there's no penalty at all. Fixes `issue 397`_.
-
-- Pyexpat C code will no longer be recorded as a source file, fixing
-  `issue 419`_.
-
-- The source kit now contains all of the files needed to have a complete source
-  tree, re-fixing `issue 137`_ and closing `issue 281`_.
-
-.. _issue 281: https://bitbucket.org/ned/coveragepy/issues/281/supply-scripts-for-testing-in-the
-.. _issue 397: https://bitbucket.org/ned/coveragepy/issues/397/stopping-and-resuming-coverage-with
-.. _issue 410: https://bitbucket.org/ned/coveragepy/issues/410/attributeerror-module-object-has-no
-.. _issue 415: https://bitbucket.org/ned/coveragepy/issues/415/repeated-coveragedataupdates-cause
-.. _issue 416: https://bitbucket.org/ned/coveragepy/issues/416/mocking-ospathexists-causes-failures
-.. _issue 418: https://bitbucket.org/ned/coveragepy/issues/418/json-parse-error
-.. _issue 419: https://bitbucket.org/ned/coveragepy/issues/419/nosource-no-source-for-code-path-to-c
-.. _issue 423: https://bitbucket.org/ned/coveragepy/issues/423/skip_covered-changes-reported-total
-
-
-Version 4.0 --- 2015-09-20
---------------------------
-
-No changes from 4.0b3
-
-
-Version 4.0b3 --- 2015-09-07
-----------------------------
-
-- Reporting on an unmeasured file would fail with a traceback.  This is now
-  fixed, closing `issue 403`_.
-
-- The Jenkins ShiningPanda plugin looks for an obsolete file name to find the
-  HTML reports to publish, so it was failing under coverage.py 4.0.  Now we
-  create that file if we are running under Jenkins, to keep things working
-  smoothly. `issue 404`_.
-
-- Kits used to include tests and docs, but didn't install them anywhere, or
-  provide all of the supporting tools to make them useful.  Kits no longer
-  include tests and docs.  If you were using them from the older packages, get
-  in touch and help me understand how.
-
-.. _issue 403: https://bitbucket.org/ned/coveragepy/issues/403/hasherupdate-fails-with-typeerror-nonetype
-.. _issue 404: https://bitbucket.org/ned/coveragepy/issues/404/shiningpanda-jenkins-plugin-cant-find-html
-
-
-
-Version 4.0b2 --- 2015-08-22
-----------------------------
-
-- 4.0b1 broke ``--append`` creating new data files.  This is now fixed, closing
-  `issue 392`_.
-
-- ``py.test --cov`` can write empty data, then touch files due to ``--source``,
-  which made coverage.py mistakenly force the data file to record lines instead
-  of arcs.  This would lead to a "Can't combine line data with arc data" error
-  message.  This is now fixed, and changed some method names in the
-  CoverageData interface.  Fixes `issue 399`_.
-
-- `CoverageData.read_fileobj` and `CoverageData.write_fileobj` replace the
-  `.read` and `.write` methods, and are now properly inverses of each other.
-
-- When using ``report --skip-covered``, a message will now be included in the
-  report output indicating how many files were skipped, and if all files are
-  skipped, coverage.py won't accidentally scold you for having no data to
-  report.  Thanks, Krystian Kichewko.
-
-- A new conversion utility has been added:  ``python -m coverage.pickle2json``
-  will convert v3.x pickle data files to v4.x JSON data files.  Thanks,
-  Alexander Todorov.  Closes `issue 395`_.
-
-- A new version identifier is available, `coverage.version_info`, a plain tuple
-  of values similar to `sys.version_info`_.
-
-.. _issue 392: https://bitbucket.org/ned/coveragepy/issues/392/run-append-doesnt-create-coverage-file
-.. _issue 395: https://bitbucket.org/ned/coveragepy/issues/395/rfe-read-pickled-files-as-well-for
-.. _issue 399: https://bitbucket.org/ned/coveragepy/issues/399/coverageexception-cant-combine-line-data
-.. _sys.version_info: https://docs.python.org/3/library/sys.html#sys.version_info
-
-
-Version 4.0b1 --- 2015-08-02
-----------------------------
-
-- Coverage.py is now licensed under the Apache 2.0 license.  See NOTICE.txt for
-  details.  Closes `issue 313`_.
-
-- The data storage has been completely revamped.  The data file is now
-  JSON-based instead of a pickle, closing `issue 236`_.  The `CoverageData`
-  class is now a public supported documented API to the data file.
-
-- A new configuration option, ``[run] note``, lets you set a note that will be
-  stored in the `runs` section of the data file.  You can use this to annotate
-  the data file with any information you like.
-
-- Unrecognized configuration options will now print an error message and stop
-  coverage.py.  This should help prevent configuration mistakes from passing
-  silently.  Finishes `issue 386`_.
-
-- In parallel mode, ``coverage erase`` will now delete all of the data files,
-  fixing `issue 262`_.
-
-- Coverage.py now accepts a directory name for ``coverage run`` and will run a
-  ``__main__.py`` found there, just like Python will.  Fixes `issue 252`_.
-  Thanks, Dmitry Trofimov.
-
-- The XML report now includes a ``missing-branches`` attribute.  Thanks, Steve
-  Peak.  This is not a part of the Cobertura DTD, so the XML report no longer
-  references the DTD.
-
-- Missing branches in the HTML report now have a bit more information in the
-  right-hand annotations.  Hopefully this will make their meaning clearer.
-
-- All the reporting functions now behave the same if no data had been
-  collected, exiting with a status code of 1.  Fixed ``fail_under`` to be
-  applied even when the report is empty.  Thanks, Ionel Cristian Mărieș.
-
-- Plugins are now initialized differently.  Instead of looking for a class
-  called ``Plugin``, coverage.py looks for a function called ``coverage_init``.
-
-- A file-tracing plugin can now ask to have built-in Python reporting by
-  returning `"python"` from its `file_reporter()` method.
-
-- Code that was executed with `exec` would be mis-attributed to the file that
-  called it.  This is now fixed, closing `issue 380`_.
-
-- The ability to use item access on `Coverage.config` (introduced in 4.0a2) has
-  been changed to a more explicit `Coverage.get_option` and
-  `Coverage.set_option` API.
-
-- The ``Coverage.use_cache`` method is no longer supported.
-
-- The private method ``Coverage._harvest_data`` is now called
-  ``Coverage.get_data``, and returns the ``CoverageData`` containing the
-  collected data.
-
-- The project is consistently referred to as "coverage.py" throughout the code
-  and the documentation, closing `issue 275`_.
-
-- Combining data files with an explicit configuration file was broken in 4.0a6,
-  but now works again, closing `issue 385`_.
-
-- ``coverage combine`` now accepts files as well as directories.
-
-- The speed is back to 3.7.1 levels, after having slowed down due to plugin
-  support, finishing up `issue 387`_.
-
-.. _issue 236: https://bitbucket.org/ned/coveragepy/issues/236/pickles-are-bad-and-you-should-feel-bad
-.. _issue 252: https://bitbucket.org/ned/coveragepy/issues/252/coverage-wont-run-a-program-with
-.. _issue 262: https://bitbucket.org/ned/coveragepy/issues/262/when-parallel-true-erase-should-erase-all
-.. _issue 275: https://bitbucket.org/ned/coveragepy/issues/275/refer-consistently-to-project-as-coverage
-.. _issue 313: https://bitbucket.org/ned/coveragepy/issues/313/add-license-file-containing-2-3-or-4
-.. _issue 380: https://bitbucket.org/ned/coveragepy/issues/380/code-executed-by-exec-excluded-from
-.. _issue 385: https://bitbucket.org/ned/coveragepy/issues/385/coverage-combine-doesnt-work-with-rcfile
-.. _issue 386: https://bitbucket.org/ned/coveragepy/issues/386/error-on-unrecognised-configuration
-.. _issue 387: https://bitbucket.org/ned/coveragepy/issues/387/performance-degradation-from-371-to-40
-
-.. 40 issues closed in 4.0 below here
-
-
-Version 4.0a6 --- 2015-06-21
-----------------------------
-
-- Python 3.5b2 and PyPy 2.6.0 are supported.
-
-- The original module-level function interface to coverage.py is no longer
-  supported.  You must now create a ``coverage.Coverage`` object, and use
-  methods on it.
-
-- The ``coverage combine`` command now accepts any number of directories as
-  arguments, and will combine all the data files from those directories.  This
-  means you don't have to copy the files to one directory before combining.
-  Thanks, Christine Lytwynec.  Finishes `issue 354`_.
-
-- Branch coverage couldn't properly handle certain extremely long files. This
-  is now fixed (`issue 359`_).
-
-- Branch coverage didn't understand yield statements properly.  Mickie Betz
-  persisted in pursuing this despite Ned's pessimism.  Fixes `issue 308`_ and
-  `issue 324`_.
-
-- The COVERAGE_DEBUG environment variable can be used to set the ``[run] debug``
-  configuration option to control what internal operations are logged.
-
-- HTML reports were truncated at formfeed characters.  This is now fixed
-  (`issue 360`_).  It's always fun when the problem is due to a `bug in the
-  Python standard library <http://bugs.python.org/issue19035>`_.
-
-- Files with incorrect encoding declaration comments are no longer ignored by
-  the reporting commands, fixing `issue 351`_.
-
-- HTML reports now include a timestamp in the footer, closing `issue 299`_.
-  Thanks, Conrad Ho.
-
-- HTML reports now begrudgingly use double-quotes rather than single quotes,
-  because there are "software engineers" out there writing tools that read HTML
-  and somehow have no idea that single quotes exist.  Capitulates to the absurd
-  `issue 361`_.  Thanks, Jon Chappell.
-
-- The ``coverage annotate`` command now handles non-ASCII characters properly,
-  closing `issue 363`_.  Thanks, Leonardo Pistone.
-
-- Drive letters on Windows were not normalized correctly, now they are. Thanks,
-  Ionel Cristian Mărieș.
-
-- Plugin support had some bugs fixed, closing `issue 374`_ and `issue 375`_.
-  Thanks, Stefan Behnel.
-
-.. _issue 299: https://bitbucket.org/ned/coveragepy/issue/299/inserted-created-on-yyyy-mm-dd-hh-mm-in
-.. _issue 308: https://bitbucket.org/ned/coveragepy/issue/308/yield-lambda-branch-coverage
-.. _issue 324: https://bitbucket.org/ned/coveragepy/issue/324/yield-in-loop-confuses-branch-coverage
-.. _issue 351: https://bitbucket.org/ned/coveragepy/issue/351/files-with-incorrect-encoding-are-ignored
-.. _issue 354: https://bitbucket.org/ned/coveragepy/issue/354/coverage-combine-should-take-a-list-of
-.. _issue 359: https://bitbucket.org/ned/coveragepy/issue/359/xml-report-chunk-error
-.. _issue 360: https://bitbucket.org/ned/coveragepy/issue/360/html-reports-get-confused-by-l-in-the-code
-.. _issue 361: https://bitbucket.org/ned/coveragepy/issue/361/use-double-quotes-in-html-output-to
-.. _issue 363: https://bitbucket.org/ned/coveragepy/issue/363/annotate-command-hits-unicode-happy-fun
-.. _issue 374: https://bitbucket.org/ned/coveragepy/issue/374/c-tracer-lookups-fail-in
-.. _issue 375: https://bitbucket.org/ned/coveragepy/issue/375/ctracer_handle_return-reads-byte-code
-
-
-Version 4.0a5 --- 2015-02-16
-----------------------------
-
-- Plugin support is now implemented in the C tracer instead of the Python
-  tracer. This greatly improves the speed of tracing projects using plugins.
-
-- Coverage.py now always adds the current directory to sys.path, so that
-  plugins can import files in the current directory (`issue 358`_).
-
-- If the `config_file` argument to the Coverage constructor is specified as
-  ".coveragerc", it is treated as if it were True.  This means setup.cfg is
-  also examined, and a missing file is not considered an error (`issue 357`_).
-
-- Wildly experimental: support for measuring processes started by the
-  multiprocessing module.  To use, set ``--concurrency=multiprocessing``,
-  either on the command line or in the .coveragerc file (`issue 117`_). Thanks,
-  Eduardo Schettino.  Currently, this does not work on Windows.
-
-- A new warning is possible, if a desired file isn't measured because it was
-  imported before coverage.py was started (`issue 353`_).
-
-- The `coverage.process_startup` function now will start coverage measurement
-  only once, no matter how many times it is called.  This fixes problems due
-  to unusual virtualenv configurations (`issue 340`_).
-
-- Added 3.5.0a1 to the list of supported CPython versions.
-
-.. _issue 117: https://bitbucket.org/ned/coveragepy/issue/117/enable-coverage-measurement-of-code-run-by
-.. _issue 340: https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy
-.. _issue 353: https://bitbucket.org/ned/coveragepy/issue/353/40a3-introduces-an-unexpected-third-case
-.. _issue 357: https://bitbucket.org/ned/coveragepy/issue/357/behavior-changed-when-coveragerc-is
-.. _issue 358: https://bitbucket.org/ned/coveragepy/issue/358/all-coverage-commands-should-adjust
-
-
-Version 4.0a4 --- 2015-01-25
-----------------------------
-
-- Plugins can now provide sys_info for debugging output.
-
-- Started plugins documentation.
-
-- Prepared to move the docs to readthedocs.org.
-
-
-Version 4.0a3 --- 2015-01-20
-----------------------------
-
-- Reports now use file names with extensions.  Previously, a report would
-  describe a/b/c.py as "a/b/c".  Now it is shown as "a/b/c.py".  This allows
-  for better support of non-Python files, and also fixed `issue 69`_.
-
-- The XML report now reports each directory as a package again.  This was a bad
-  regression, I apologize.  This was reported in `issue 235`_, which is now
-  fixed.
-
-- A new configuration option for the XML report: ``[xml] package_depth``
-  controls which directories are identified as packages in the report.
-  Directories deeper than this depth are not reported as packages.
-  The default is that all directories are reported as packages.
-  Thanks, Lex Berezhny.
-
-- When looking for the source for a frame, check if the file exists. On
-  Windows, .pyw files are no longer recorded as .py files. Along the way, this
-  fixed `issue 290`_.
-
-- Empty files are now reported as 100% covered in the XML report, not 0%
-  covered (`issue 345`_).
-
-- Regexes in the configuration file are now compiled as soon as they are read,
-  to provide error messages earlier (`issue 349`_).
-
-.. _issue 69: https://bitbucket.org/ned/coveragepy/issue/69/coverage-html-overwrite-files-that-doesnt
-.. _issue 235: https://bitbucket.org/ned/coveragepy/issue/235/package-name-is-missing-in-xml-report
-.. _issue 290: https://bitbucket.org/ned/coveragepy/issue/290/running-programmatically-with-pyw-files
-.. _issue 345: https://bitbucket.org/ned/coveragepy/issue/345/xml-reports-line-rate-0-for-empty-files
-.. _issue 349: https://bitbucket.org/ned/coveragepy/issue/349/bad-regex-in-config-should-get-an-earlier
-
-
-Version 4.0a2 --- 2015-01-14
-----------------------------
-
-- Officially support PyPy 2.4, and PyPy3 2.4.  Drop support for
-  CPython 3.2 and older versions of PyPy.  The code won't work on CPython 3.2.
-  It will probably still work on older versions of PyPy, but I'm not testing
-  against them.
-
-- Plugins!
-
-- The original command line switches (`-x` to run a program, etc) are no
-  longer supported.
-
-- A new option: `coverage report --skip-covered` will reduce the number of
-  files reported by skipping files with 100% coverage.  Thanks, Krystian
-  Kichewko.  This means that empty `__init__.py` files will be skipped, since
-  they are 100% covered, closing `issue 315`_.
-
-- You can now specify the ``--fail-under`` option in the ``.coveragerc`` file
-  as the ``[report] fail_under`` option.  This closes `issue 314`_.
-
-- The ``COVERAGE_OPTIONS`` environment variable is no longer supported.  It was
-  a hack for ``--timid`` before configuration files were available.
-
-- The HTML report now has filtering.  Type text into the Filter box on the
-  index page, and only modules with that text in the name will be shown.
-  Thanks, Danny Allen.
-
-- The textual report and the HTML report used to report partial branches
-  differently for no good reason.  Now the text report's "missing branches"
-  column is a "partial branches" column so that both reports show the same
-  numbers.  This closes `issue 342`_.
-
-- If you specify a ``--rcfile`` that cannot be read, you will get an error
-  message.  Fixes `issue 343`_.
-
-- The ``--debug`` switch can now be used on any command.
-
-- You can now programmatically adjust the configuration of coverage.py by
-  setting items on `Coverage.config` after construction.
-
-- A module run with ``-m`` can be used as the argument to ``--source``, fixing
-  `issue 328`_.  Thanks, Buck Evan.
-
-- The regex for matching exclusion pragmas has been fixed to allow more kinds
-  of whitespace, fixing `issue 334`_.
-
-- Made some PyPy-specific tweaks to improve speed under PyPy.  Thanks, Alex
-  Gaynor.
-
-- In some cases, with a source file missing a final newline, coverage.py would
-  count statements incorrectly.  This is now fixed, closing `issue 293`_.
-
-- The status.dat file that HTML reports use to avoid re-creating files that
-  haven't changed is now a JSON file instead of a pickle file.  This obviates
-  `issue 287`_ and `issue 237`_.
-
-.. _issue 237: https://bitbucket.org/ned/coveragepy/issue/237/htmlcov-with-corrupt-statusdat
-.. _issue 287: https://bitbucket.org/ned/coveragepy/issue/287/htmlpy-doesnt-specify-pickle-protocol
-.. _issue 293: https://bitbucket.org/ned/coveragepy/issue/293/number-of-statement-detection-wrong-if-no
-.. _issue 314: https://bitbucket.org/ned/coveragepy/issue/314/fail_under-param-not-working-in-coveragerc
-.. _issue 315: https://bitbucket.org/ned/coveragepy/issue/315/option-to-omit-empty-files-eg-__init__py
-.. _issue 328: https://bitbucket.org/ned/coveragepy/issue/328/misbehavior-in-run-source
-.. _issue 334: https://bitbucket.org/ned/coveragepy/issue/334/pragma-not-recognized-if-tab-character
-.. _issue 342: https://bitbucket.org/ned/coveragepy/issue/342/console-and-html-coverage-reports-differ
-.. _issue 343: https://bitbucket.org/ned/coveragepy/issue/343/an-explicitly-named-non-existent-config
-
-
-Version 4.0a1 --- 2014-09-27
-----------------------------
-
-- Python versions supported are now CPython 2.6, 2.7, 3.2, 3.3, and 3.4, and
-  PyPy 2.2.
-
-- Gevent, eventlet, and greenlet are now supported, closing `issue 149`_.
-  The ``concurrency`` setting specifies the concurrency library in use.  Huge
-  thanks to Peter Portante for initial implementation, and to Joe Jevnik for
-  the final insight that completed the work.
-
-- Options are now also read from a setup.cfg file, if any.  Sections are
-  prefixed with "coverage:", so the ``[run]`` options will be read from the
-  ``[coverage:run]`` section of setup.cfg.  Finishes `issue 304`_.
-
-- The ``report -m`` command can now show missing branches when reporting on
-  branch coverage.  Thanks, Steve Leonard. Closes `issue 230`_.
-
-- The XML report now contains a <source> element, fixing `issue 94`_.  Thanks
-  Stan Hu.
-
-- The class defined in the coverage module is now called ``Coverage`` instead
-  of ``coverage``, though the old name still works, for backward compatibility.
-
-- The ``fail-under`` value is now rounded the same as reported results,
-  preventing paradoxical results, fixing `issue 284`_.
-
-- The XML report will now create the output directory if need be, fixing
-  `issue 285`_.  Thanks, Chris Rose.
-
-- HTML reports no longer raise UnicodeDecodeError if a Python file has
-  undecodable characters, fixing `issue 303`_ and `issue 331`_.
-
-- The annotate command will now annotate all files, not just ones relative to
-  the current directory, fixing `issue 57`_.
-
-- The coverage module no longer causes deprecation warnings on Python 3.4 by
-  importing the imp module, fixing `issue 305`_.
-
-- Encoding declarations in source files are only considered if they are truly
-  comments.  Thanks, Anthony Sottile.
-
-.. _issue 57: https://bitbucket.org/ned/coveragepy/issue/57/annotate-command-fails-to-annotate-many
-.. _issue 94: https://bitbucket.org/ned/coveragepy/issue/94/coverage-xml-doesnt-produce-sources
-.. _issue 149: https://bitbucket.org/ned/coveragepy/issue/149/coverage-gevent-looks-broken
-.. _issue 230: https://bitbucket.org/ned/coveragepy/issue/230/show-line-no-for-missing-branches-in
-.. _issue 284: https://bitbucket.org/ned/coveragepy/issue/284/fail-under-should-show-more-precision
-.. _issue 285: https://bitbucket.org/ned/coveragepy/issue/285/xml-report-fails-if-output-file-directory
-.. _issue 303: https://bitbucket.org/ned/coveragepy/issue/303/unicodedecodeerror
-.. _issue 304: https://bitbucket.org/ned/coveragepy/issue/304/attempt-to-get-configuration-from-setupcfg
-.. _issue 305: https://bitbucket.org/ned/coveragepy/issue/305/pendingdeprecationwarning-the-imp-module
-.. _issue 331: https://bitbucket.org/ned/coveragepy/issue/331/failure-of-encoding-detection-on-python2
-
-
-Version 3.7.1 --- 2013-12-13
-----------------------------
-
-- Improved the speed of HTML report generation by about 20%.
-
-- Fixed the mechanism for finding OS-installed static files for the HTML report
-  so that it will actually find OS-installed static files.
-
-
-Version 3.7 --- 2013-10-06
---------------------------
-
-- Added the ``--debug`` switch to ``coverage run``.  It accepts a list of
-  options indicating the type of internal activity to log to stderr.
-
-- Improved the branch coverage facility, fixing `issue 92`_ and `issue 175`_.
-
-- Running code with ``coverage run -m`` now behaves more like Python does,
-  setting sys.path properly, which fixes `issue 207`_ and `issue 242`_.
-
-- Coverage.py can now run .pyc files directly, closing `issue 264`_.
-
-- Coverage.py properly supports .pyw files, fixing `issue 261`_.
-
-- Omitting files within a tree specified with the ``source`` option would
-  cause them to be incorrectly marked as unexecuted, as described in
-  `issue 218`_.  This is now fixed.
-
-- When specifying paths to alias together during data combining, you can now
-  specify relative paths, fixing `issue 267`_.
-
-- Most file paths can now be specified with username expansion (``~/src``, or
-  ``~build/src``, for example), and with environment variable expansion
-  (``build/$BUILDNUM/src``).
-
-- Trying to create an XML report with no files to report on, would cause a
-  ZeroDivideError, but no longer does, fixing `issue 250`_.
-
-- When running a threaded program under the Python tracer, coverage.py no
-  longer issues a spurious warning about the trace function changing: "Trace
-  function changed, measurement is likely wrong: None."  This fixes `issue
-  164`_.
-
-- Static files necessary for HTML reports are found in system-installed places,
-  to ease OS-level packaging of coverage.py.  Closes `issue 259`_.
-
-- Source files with encoding declarations, but a blank first line, were not
-  decoded properly.  Now they are.  Thanks, Roger Hu.
-
-- The source kit now includes the ``__main__.py`` file in the root coverage
-  directory, fixing `issue 255`_.
-
-.. _issue 92: https://bitbucket.org/ned/coveragepy/issue/92/finally-clauses-arent-treated-properly-in
-.. _issue 164: https://bitbucket.org/ned/coveragepy/issue/164/trace-function-changed-warning-when-using
-.. _issue 175: https://bitbucket.org/ned/coveragepy/issue/175/branch-coverage-gets-confused-in-certain
-.. _issue 207: https://bitbucket.org/ned/coveragepy/issue/207/run-m-cannot-find-module-or-package-in
-.. _issue 242: https://bitbucket.org/ned/coveragepy/issue/242/running-a-two-level-package-doesnt-work
-.. _issue 218: https://bitbucket.org/ned/coveragepy/issue/218/run-command-does-not-respect-the-omit-flag
-.. _issue 250: https://bitbucket.org/ned/coveragepy/issue/250/uncaught-zerodivisionerror-when-generating
-.. _issue 255: https://bitbucket.org/ned/coveragepy/issue/255/directory-level-__main__py-not-included-in
-.. _issue 259: https://bitbucket.org/ned/coveragepy/issue/259/allow-use-of-system-installed-third-party
-.. _issue 261: https://bitbucket.org/ned/coveragepy/issue/261/pyw-files-arent-reported-properly
-.. _issue 264: https://bitbucket.org/ned/coveragepy/issue/264/coverage-wont-run-pyc-files
-.. _issue 267: https://bitbucket.org/ned/coveragepy/issue/267/relative-path-aliases-dont-work
-
-
-Version 3.6 --- 2013-01-05
---------------------------
-
-- Added a page to the docs about troublesome situations, closing `issue 226`_,
-  and added some info to the TODO file, closing `issue 227`_.
-
-.. _issue 226: https://bitbucket.org/ned/coveragepy/issue/226/make-readme-section-to-describe-when
-.. _issue 227: https://bitbucket.org/ned/coveragepy/issue/227/update-todo
-
-
-Version 3.6b3 --- 2012-12-29
-----------------------------
-
-- Beta 2 broke the nose plugin. It's fixed again, closing `issue 224`_.
-
-.. _issue 224: https://bitbucket.org/ned/coveragepy/issue/224/36b2-breaks-nosexcover
-
-
-Version 3.6b2 --- 2012-12-23
-----------------------------
-
-- Coverage.py runs on Python 2.3 and 2.4 again. It was broken in 3.6b1.
-
-- The C extension is optionally compiled using a different more widely-used
-  technique, taking another stab at fixing `issue 80`_ once and for all.
-
-- Combining data files would create entries for phantom files if used with
-  ``source`` and path aliases.  It no longer does.
-
-- ``debug sys`` now shows the configuration file path that was read.
-
-- If an oddly-behaved package claims that code came from an empty-string
-  file name, coverage.py no longer associates it with the directory name,
-  fixing `issue 221`_.
-
-.. _issue 221: https://bitbucket.org/ned/coveragepy/issue/221/coveragepy-incompatible-with-pyratemp
-
-
-Version 3.6b1 --- 2012-11-28
-----------------------------
-
-- Wildcards in ``include=`` and ``omit=`` arguments were not handled properly
-  in reporting functions, though they were when running.  Now they are handled
-  uniformly, closing `issue 143`_ and `issue 163`_.  **NOTE**: it is possible
-  that your configurations may now be incorrect.  If you use ``include`` or
-  ``omit`` during reporting, whether on the command line, through the API, or
-  in a configuration file, please check carefully that you were not relying on
-  the old broken behavior.
-
-- The **report**, **html**, and **xml** commands now accept a ``--fail-under``
-  switch that indicates in the exit status whether the coverage percentage was
-  less than a particular value.  Closes `issue 139`_.
-
-- The reporting functions coverage.report(), coverage.html_report(), and
-  coverage.xml_report() now all return a float, the total percentage covered
-  measurement.
-
-- The HTML report's title can now be set in the configuration file, with the
-  ``--title`` switch on the command line, or via the API.
-
-- Configuration files now support substitution of environment variables, using
-  syntax like ``${WORD}``.  Closes `issue 97`_.
-
-- Embarrassingly, the ``[xml] output=`` setting in the .coveragerc file simply
-  didn't work.  Now it does.
-
-- The XML report now consistently uses file names for the file name attribute,
-  rather than sometimes using module names.  Fixes `issue 67`_.
-  Thanks, Marcus Cobden.
-
-- Coverage percentage metrics are now computed slightly differently under
-  branch coverage.  This means that completely unexecuted files will now
-  correctly have 0% coverage, fixing `issue 156`_.  This also means that your
-  total coverage numbers will generally now be lower if you are measuring
-  branch coverage.
-
-- When installing, now in addition to creating a "coverage" command, two new
-  aliases are also installed.  A "coverage2" or "coverage3" command will be
-  created, depending on whether you are installing in Python 2.x or 3.x.
-  A "coverage-X.Y" command will also be created corresponding to your specific
-  version of Python.  Closes `issue 111`_.
-
-- The coverage.py installer no longer tries to bootstrap setuptools or
-  Distribute.  You must have one of them installed first, as `issue 202`_
-  recommended.
-
-- The coverage.py kit now includes docs (closing `issue 137`_) and tests.
-
-- On Windows, files are now reported in their correct case, fixing `issue 89`_
-  and `issue 203`_.
-
-- If a file is missing during reporting, the path shown in the error message
-  is now correct, rather than an incorrect path in the current directory.
-  Fixes `issue 60`_.
-
-- Running an HTML report in Python 3 in the same directory as an old Python 2
-  HTML report would fail with a UnicodeDecodeError. This issue (`issue 193`_)
-  is now fixed.
-
-- Fixed yet another error trying to parse non-Python files as Python, this
-  time an IndentationError, closing `issue 82`_ for the fourth time...
-
-- If `coverage xml` fails because there is no data to report, it used to
-  create a zero-length XML file.  Now it doesn't, fixing `issue 210`_.
-
-- Jython files now work with the ``--source`` option, fixing `issue 100`_.
-
-- Running coverage.py under a debugger is unlikely to work, but it shouldn't
-  fail with "TypeError: 'NoneType' object is not iterable".  Fixes `issue
-  201`_.
-
-- On some Linux distributions, when installed with the OS package manager,
-  coverage.py would report its own code as part of the results.  Now it won't,
-  fixing `issue 214`_, though this will take some time to be repackaged by the
-  operating systems.
-