Fri, 18 Sep 2015 19:46:57 +0200
Changed the logic for the various code metrics calculations to be separate services and fine tuned the dialogs.
--- a/PluginMetricsRadon.e4p Thu Sep 17 19:57:14 2015 +0200 +++ b/PluginMetricsRadon.e4p Fri Sep 18 19:46:57 2015 +0200 @@ -16,6 +16,7 @@ <Sources> <Source>PluginMetricsRadon.py</Source> <Source>RadonMetrics/CodeMetricsCalculator.py</Source> + <Source>RadonMetrics/MaintainabilityIndexCalculator.py</Source> <Source>RadonMetrics/MaintainabilityIndexDialog.py</Source> <Source>RadonMetrics/RawMetricsDialog.py</Source> <Source>RadonMetrics/__init__.py</Source>
--- a/PluginMetricsRadon.py Thu Sep 17 19:57:14 2015 +0200 +++ b/PluginMetricsRadon.py Fri Sep 18 19:46:57 2015 +0200 @@ -53,14 +53,14 @@ maintainability index was determined for a file @signal complexityDone(str, dict) emitted when the cyclomatic complexity was determined for a file - @signal error(str, str) emitted in case of an error - @signal batchFinished() emitted when a code metrics batch is done + @signal error(str, str, str) emitted in case of an error + @signal batchFinished(str) emitted when a code metrics batch is done """ metricsDone = pyqtSignal(str, dict) maintainabilityIndexDone = pyqtSignal(str, dict) complexityDone = pyqtSignal(str, dict) - error = pyqtSignal(str, str) - batchFinished = pyqtSignal() + error = pyqtSignal(str, str, str) + batchFinished = pyqtSignal(str) def __init__(self, ui): """ @@ -77,49 +77,100 @@ path = os.path.join(os.path.dirname(__file__), packageName) try: + # raw code metrics calculation self.backgroundService.serviceConnect( - 'radon', 'Python2', path, 'CodeMetricsCalculator', - self.metricsCalculationDone, - onErrorCallback=self.serviceErrorPy2, - onBatchDone=self.batchJobDone) + 'radon_raw', 'Python2', path, 'CodeMetricsCalculator', + lambda fn, res: self.metricsCalculationDone("raw", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy2( + "raw", fx, lang, fn, msg), + onBatchDone=lambda fx, lang: self.batchJobDone( + "raw", fx, lang)) self.backgroundService.serviceConnect( - 'radon', 'Python3', path, 'CodeMetricsCalculator', - self.metricsCalculationDone, - onErrorCallback=self.serviceErrorPy3, - onBatchDone=self.batchJobDone) + 'radon_raw', 'Python3', path, 'CodeMetricsCalculator', + lambda fn, res: self.metricsCalculationDone("raw", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy3( + "raw", fx, lang, fn, msg), + onBatchDone=lambda fx, lang: self.batchJobDone( + "raw", fx, lang)) + + # maintainability index calculation + self.backgroundService.serviceConnect( + 'radon_mi', 'Python2', path, 'MaintainabilityIndexCalculator', + lambda fn, res: self.metricsCalculationDone("mi", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy2( + "mi", fx, lang, fn, msg), + onBatchDone=lambda fx, lang: self.batchJobDone( + "mi", fx, lang)) + self.backgroundService.serviceConnect( + 'radon_mi', 'Python3', path, 'MaintainabilityIndexCalculator', + lambda fn, res: self.metricsCalculationDone("mi", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy3( + "mi", fx, lang, fn, msg), + onBatchDone=lambda fx, lang: self.batchJobDone( + "mi", fx, lang)) + self.hasBatch = True except TypeError: + # backward compatibility for eric 6.0 + # raw code metrics calculation self.backgroundService.serviceConnect( - 'radon', 'Python2', path, 'CodeMetricsCalculator', - self.metricsCalculationDone, - onErrorCallback=self.serviceErrorPy2) + 'radon_raw', 'Python2', path, 'CodeMetricsCalculator', + lambda fn, res: self.metricsCalculationDone("raw", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy2( + "raw", fx, lang, fn, msg)) self.backgroundService.serviceConnect( - 'radon', 'Python3', path, 'CodeMetricsCalculator', - self.metricsCalculationDone, - onErrorCallback=self.serviceErrorPy3) + 'radon_raw', 'Python3', path, 'CodeMetricsCalculator', + lambda fn, res: self.metricsCalculationDone("raw", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy3( + "raw", fx, lang, fn, msg)) + + # maintainability index calculation + self.backgroundService.serviceConnect( + 'radon_mi', 'Python2', path, 'MaintainabilityIndexCalculator', + lambda fn, res: self.metricsCalculationDone("mi", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy2( + "mi", fx, lang, fn, msg)) + self.backgroundService.serviceConnect( + 'radon_mi', 'Python3', path, 'MaintainabilityIndexCalculator', + lambda fn, res: self.metricsCalculationDone("mi", fn, res), + onErrorCallback=lambda fx, lang, fn, msg: self.serviceErrorPy3( + "mi", fx, lang, fn, msg)) + self.hasBatch = False - self.queuedBatches = [] - self.batchesFinished = True + self.queuedBatches = { + "raw": [], + "mi": [], + "cc": [], + } + self.batchesFinished = { + "raw": True, + "mi": True, + "cc": True, + } self.__translator = None self.__loadTranslator() - def __serviceError(self, fn, msg): + def __serviceError(self, type_, fn, msg): """ Private slot handling service errors. + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] @param fn file name @type str @param msg message text @type str """ - self.error.emit(fn, msg) + self.error.emit(type_, fn, msg) - def serviceErrorPy2(self, fx, lang, fn, msg): + def serviceErrorPy2(self, type_, fx, lang, fn, msg): """ Public slot handling service errors for Python 2. + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] @param fx service name @type str @param lang language @@ -129,17 +180,20 @@ @param msg message text @type str """ - if fx in ['radon', 'batch_radon'] and lang == 'Python2': - if fx == 'radon': - self.__serviceError(fn, msg) + if fx in ['radon_' + type_, 'batch_radon_' + type_] and \ + lang == 'Python2': + if fx == 'radon_' + type_: + self.__serviceError(type_, fn, msg) else: - self.__serviceError(self.tr("Python 2 batch job"), msg) - self.batchJobDone(fx, lang) + self.__serviceError(type_, self.tr("Python 2 batch job"), msg) + self.batchJobDone(type_, fx, lang) - def serviceErrorPy3(self, fx, lang, fn, msg): + def serviceErrorPy3(self, type_, fx, lang, fn, msg): """ Public slot handling service errors for Python 3. + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] @param fx service name @type str @param lang language @@ -149,52 +203,57 @@ @param msg message text @type str """ - if fx in ['radon', 'batch_radon'] and lang == 'Python3': - if fx == 'radon': - self.__serviceError(fn, msg) + if fx in ['radon_' + type_, 'batch_radon_' + type_] and \ + lang == 'Python3': + if fx == 'radon_' + type_: + self.__serviceError(type_, fn, msg) else: - self.__serviceError(self.tr("Python 3 batch job"), msg) - self.batchJobDone(fx, lang) + self.__serviceError(type_, self.tr("Python 3 batch job"), msg) + self.batchJobDone(type_, fx, lang) - def batchJobDone(self, fx, lang): + def batchJobDone(self, type_, fx, lang): """ Public slot handling the completion of a batch job. + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] @param fx service name @type str @param lang language @type str """ - if fx in ['radon', 'batch_radon']: - if lang in self.queuedBatches: - self.queuedBatches.remove(lang) + if fx in ['radon_' + type_, 'batch_radon_' + type_]: + if lang in self.queuedBatches[type_]: + self.queuedBatches[type_].remove(lang) # prevent sending the signal multiple times - if len(self.queuedBatches) == 0 and not self.batchesFinished: - self.batchFinished.emit() - self.batchesFinished = True + if len(self.queuedBatches[type_]) == 0 and \ + not self.batchesFinished[type_]: + self.batchFinished.emit(type_) + self.batchesFinished[type_] = True - def metricsCalculationDone(self, filename, metricsType, result): + def metricsCalculationDone(self, type_, filename, result): """ Public slot to dispatch the result. + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] @param filename name of the file the results belong to @type str - @param metricsType type of the calculated metrics - @type str, one of ["raw", "mi", "cc"] @param result result dictionary @type dict """ - if metricsType == "raw": + if type_ == "raw": self.metricsDone.emit(filename, result) - elif metricsType == "mi": + elif type_ == "mi": self.maintainabilityIndexDone.emit(filename, result) - elif metricsType == "cc": + elif type_ == "cc": self.complexityDone.emit(filename, result) else: self.error.emit( + type_, filename, self.tr("Unknown metrics result received ({0}).").format( - metricsType) + type_) ) def __initialize(self): @@ -240,7 +299,7 @@ return self.backgroundService.enqueueRequest( - 'radon', lang, filename, [source, 'raw']) + 'radon_raw', lang, filename, [source]) def rawMetricsBatch(self, argumentsList): """ @@ -260,22 +319,22 @@ if lang not in ['Python2', 'Python3']: continue else: - data[lang].append((filename, source, 'raw')) + data[lang].append((filename, source)) - self.queuedBatches = [] + self.queuedBatches["raw"] = [] for lang in ['Python2', 'Python3']: if data[lang]: - self.queuedBatches.append(lang) - self.backgroundService.enqueueRequest('batch_radon', lang, "", - data[lang]) - self.batchesFinished = False + self.queuedBatches["raw"].append(lang) + self.backgroundService.enqueueRequest('batch_radon_raw', lang, + "", data[lang]) + self.batchesFinished["raw"] = False def cancelRawMetricsBatch(self): """ Public method to cancel all batch jobs. """ for lang in ['Python2', 'Python3']: - self.backgroundService.requestCancel('batch_radon', lang) + self.backgroundService.requestCancel('batch_radon_raw', lang) def maintainabilityIndex(self, lang, filename, source): """ @@ -296,7 +355,7 @@ return self.backgroundService.enqueueRequest( - 'radon', lang, filename, [source, 'mi']) + 'radon_mi', lang, filename, [source]) def maintainabilityIndexBatch(self, argumentsList): """ @@ -316,22 +375,22 @@ if lang not in ['Python2', 'Python3']: continue else: - data[lang].append((filename, source, 'mi')) + data[lang].append((filename, source)) - self.queuedBatches = [] + self.queuedBatches["mi"] = [] for lang in ['Python2', 'Python3']: if data[lang]: - self.queuedBatches.append(lang) - self.backgroundService.enqueueRequest('batch_radon', lang, "", - data[lang]) - self.batchesFinished = False + self.queuedBatches["mi"].append(lang) + self.backgroundService.enqueueRequest('batch_radon_mi', lang, + "", data[lang]) + self.batchesFinished["mi"] = False def cancelMaintainabilityIndexBatch(self): """ Public method to cancel all batch jobs. """ for lang in ['Python2', 'Python3']: - self.backgroundService.requestCancel('batch_radon', lang) + self.backgroundService.requestCancel('batch_radon_mi', lang) def activate(self): """
--- a/RadonMetrics/CodeMetricsCalculator.py Thu Sep 17 19:57:14 2015 +0200 +++ b/RadonMetrics/CodeMetricsCalculator.py Fri Sep 18 19:46:57 2015 +0200 @@ -11,7 +11,6 @@ pass import multiprocessing -import sys def initService(): @@ -20,7 +19,7 @@ @return the entry point for the background client (function) """ - return codeMetrics + return rawCodeMetrics def initBatchService(): @@ -29,32 +28,26 @@ @return the entry point for the background client (function) """ - return batchCodeMetrics + return batchRawCodeMetrics -def codeMetrics(file, text="", type_=""): +def rawCodeMetrics(file, text=""): """ - Private function to calculate selected code metrics of one file. + Private function to calculate the raw code metrics of one file. @param file source filename @type str @param text source text @param str - @return tuple containing the filename and the result list - @rtype (str, list) + @return tuple containing the result dictionary + @rtype (tuple of dict) """ - if type_ == "raw": - return __rawCodeMetrics(file, text) - elif type_ == "mi": - return __maintainabilityIndex(file, text) - - res = {"error": "Unknown metrics '{0}'.".format(type_)} - return (res, ) + return __rawCodeMetrics(file, text) -def batchCodeMetrics(argumentsList, send, fx, cancelled): +def batchRawCodeMetrics(argumentsList, send, fx, cancelled): """ - Module function to calculate selected code metrics for a batch of files. + Module function to calculate the raw code metrics for a batch of files. @param argumentsList list of arguments tuples as given for check @type list @@ -111,13 +104,8 @@ @param output output queue @type multiprocessing.Queue """ - for filename, source, type_ in iter(input.get, 'STOP'): - if type_ == "raw": - result = __rawCodeMetrics(filename, source) - elif type_ == "mi": - result = __maintainabilityIndex(filename, source) - else: - result = {} + for filename, source in iter(input.get, 'STOP'): + result = __rawCodeMetrics(filename, source) output.put((filename, result)) @@ -137,7 +125,7 @@ res = __raw2Dict(analyze(text)) except Exception as err: res = {"error": str(err)} - return ("raw", res) + return (res, ) def __raw2Dict(obj): @@ -156,33 +144,3 @@ if v is not None: result[a] = v return result - - -def __maintainabilityIndex(file, text=""): - """ - Private function to calculate the maintainability index for one Python - file. - - @param file source filename - @type str - @param text source text - @type str - @return tuple containing the result dictionary - @rtype (tuple of dict) - """ - from radon.metrics import mi_visit, mi_rank - - # Check type for py2: if not str it's unicode - if sys.version_info[0] == 2: - try: - text = text.encode('utf-8') - except UnicodeError: - pass - - try: - mi = mi_visit(text, True) - rank = mi_rank(mi) - res = {"mi": mi, "rank": rank} - except Exception as err: - res = {"error": str(err)} - return ("mi", res)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/RadonMetrics/MaintainabilityIndexCalculator.py Fri Sep 18 19:46:57 2015 +0200 @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2015 Detlev Offenbach <detlev@die-offenbachs.de> +# + +from __future__ import unicode_literals + +try: + str = unicode # __IGNORE_EXCEPTION __IGNORE_WARNING__ +except NameError: + pass + +import multiprocessing +import sys + + +def initService(): + """ + Initialize the service and return the entry point. + + @return the entry point for the background client (function) + """ + return maintainabilityIndex + + +def initBatchService(): + """ + Initialize the batch service and return the entry point. + + @return the entry point for the background client (function) + """ + return batchMaintainabilityIndex + + +def maintainabilityIndex(file, text=""): + """ + Private function to calculate the maintainability index of one file. + + @param file source filename + @type str + @param text source text + @param str + @return tuple containing the result dictionary + @rtype (tuple of dict) + """ + return __maintainabilityIndex(file, text) + + +def batchMaintainabilityIndex(argumentsList, send, fx, cancelled): + """ + Module function to calculate the maintainability index for a batch of + files. + + @param argumentsList list of arguments tuples as given for check + @type list + @param send reference to send function + @type function + @param fx registered service name + @type str + @param cancelled reference to function checking for a cancellation + @type function + """ + try: + NumberOfProcesses = multiprocessing.cpu_count() + if NumberOfProcesses >= 1: + NumberOfProcesses -= 1 + except NotImplementedError: + NumberOfProcesses = 1 + + # Create queues + taskQueue = multiprocessing.Queue() + doneQueue = multiprocessing.Queue() + + # Submit tasks (initially two time number of processes + initialTasks = 2 * NumberOfProcesses + for task in argumentsList[:initialTasks]: + taskQueue.put(task) + + # Start worker processes + for i in range(NumberOfProcesses): + multiprocessing.Process(target=worker, args=(taskQueue, doneQueue))\ + .start() + + # Get and send results + endIndex = len(argumentsList) - initialTasks + for i in range(len(argumentsList)): + filename, result = doneQueue.get() + send(fx, filename, result) + if cancelled(): + # just exit the loop ignoring the results of queued tasks + break + if i < endIndex: + taskQueue.put(argumentsList[i + initialTasks]) + + # Tell child processes to stop + for i in range(NumberOfProcesses): + taskQueue.put('STOP') + + +def worker(input, output): + """ + Module function acting as the parallel worker for the style check. + + @param input input queue + @type multiprocessing.Queue + @param output output queue + @type multiprocessing.Queue + """ + for filename, source in iter(input.get, 'STOP'): + result = __maintainabilityIndex(filename, source) + output.put((filename, result)) + + +def __maintainabilityIndex(file, text=""): + """ + Private function to calculate the maintainability index for one Python + file. + + @param file source filename + @type str + @param text source text + @type str + @return tuple containing the result dictionary + @rtype (tuple of dict) + """ + from radon.metrics import mi_visit, mi_rank + + # Check type for py2: if not str it's unicode + if sys.version_info[0] == 2: + try: + text = text.encode('utf-8') + except UnicodeError: + pass + + try: + mi = mi_visit(text, True) + rank = mi_rank(mi) + res = {"mi": mi, "rank": rank} + except Exception as err: + res = {"error": str(err)} + return (res, )
--- a/RadonMetrics/MaintainabilityIndexDialog.py Thu Sep 17 19:57:14 2015 +0200 +++ b/RadonMetrics/MaintainabilityIndexDialog.py Fri Sep 18 19:46:57 2015 +0200 @@ -18,6 +18,7 @@ import fnmatch from PyQt5.QtCore import pyqtSlot, qVersion, Qt, QTimer, QLocale +from PyQt5.QtGui import QColor from PyQt5.QtWidgets import ( QDialog, QDialogButtonBox, QAbstractButton, QHeaderView, QTreeWidgetItem, QApplication @@ -63,6 +64,7 @@ self.__project = e5App().getObject("Project") self.__locale = QLocale() self.__finished = True + self.__errorItem = None self.__fileList = [] self.filterFrame.setVisible(False) @@ -75,6 +77,11 @@ "<tr><td><b>C</b></td><td>score ≤ 9</td></tr>" "</table>" )) + self.__rankColors = { + "A": Qt.green, + "B": Qt.yellow, #QColor("orange"), + "C": Qt.red, + } def __resizeResultColumns(self): """ @@ -92,16 +99,18 @@ @param values values to be displayed @type dict """ - # TODO: colorize the rank column according to rank (green, orange, red) data = [self.__project.getRelativePath(filename)] try: - data.append(self.__locale.toString(float(values["mi"]), "f", 2)) + data.append("{0:>6}".format( + self.__locale.toString(float(values["mi"]), "f", 2))) except ValueError: data.append(values["mi"]) data.append(values["rank"]) itm = QTreeWidgetItem(self.resultList, data) itm.setTextAlignment(1, Qt.Alignment(Qt.AlignRight)) itm.setTextAlignment(2, Qt.Alignment(Qt.AlignHCenter)) + if values["rank"] in ["A", "B", "C"]: + itm.setBackground(2, self.__rankColors[values["rank"]]) if values["rank"] in ["A", "B", "C"]: self.__summary[values["rank"]] += 1 @@ -115,13 +124,18 @@ @param message error message @type str """ - itm = QTreeWidgetItem(self.resultList, [ - "{0} ({1})".format(self.__project.getRelativePath(filename), - message)]) - itm.setFirstColumnSpanned(True) - font = itm.font(0) - font.setItalic(True) - itm.setFont(0, font) + if self.__errorItem is None: + self.__errorItem = QTreeWidgetItem(self.resultList, [ + self.tr("Errors")]) + self.__errorItem.setExpanded(True) + self.__errorItem.setForeground(0, Qt.red) + + msg = "{0} ({1})".format(self.__project.getRelativePath(filename), + message) + if not self.resultList.findItems(msg, Qt.MatchExactly): + itm = QTreeWidgetItem(self.__errorItem, [msg]) + itm.setForeground(0, Qt.red) + itm.setFirstColumnSpanned(True) def prepare(self, fileList, project): """ @@ -275,25 +289,32 @@ self.__finished = False self.radonService.maintainabilityIndexBatch(argumentsList) - def __batchFinished(self): + def __batchFinished(self, type_): """ Private slot handling the completion of a batch job. + + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] """ - self.checkProgressLabel.setPath("") - self.checkProgress.setMaximum(1) - self.checkProgress.setValue(1) - self.__finish() + if type_ == "mi": + self.checkProgressLabel.setPath("") + self.checkProgress.setMaximum(1) + self.checkProgress.setValue(1) + self.__finish() - def __processError(self, fn, msg): + def __processError(self, type_, fn, msg): """ Private slot to process an error indication from the service. + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] @param fn filename of the file @type str @param msg error message @type str """ - self.__createErrorItem(fn, msg) + if type_ == "mi": + self.__createErrorItem(fn, msg) def __processResult(self, fn, result): """ @@ -321,7 +342,7 @@ self.progress += 1 self.checkProgress.setValue(self.progress) - self.checkProgressLabel.setPath(fn) + self.checkProgressLabel.setPath(self.__project.getRelativePath(fn)) QApplication.processEvents() if not self.__batch: @@ -404,6 +425,7 @@ fileList = \ [f for f in fileList if not fnmatch.fnmatch(f, filter)] + self.__errorItem = None self.resultList.clear() self.cancelled = False self.start(fileList)
--- a/RadonMetrics/RawMetricsDialog.py Thu Sep 17 19:57:14 2015 +0200 +++ b/RadonMetrics/RawMetricsDialog.py Fri Sep 18 19:46:57 2015 +0200 @@ -68,6 +68,7 @@ self.__project = e5App().getObject("Project") self.__locale = QLocale() self.__finished = True + self.__errorItem = None self.__fileList = [] self.filterFrame.setVisible(False) @@ -133,13 +134,18 @@ @param message error message @type str """ - itm = QTreeWidgetItem(self.resultList, [ - "{0} ({1})".format(self.__project.getRelativePath(filename), - message)]) - itm.setFirstColumnSpanned(True) - font = itm.font(0) - font.setItalic(True) - itm.setFont(0, font) + if self.__errorItem is None: + self.__errorItem = QTreeWidgetItem(self.resultList, [ + self.tr("Errors")]) + self.__errorItem.setExpanded(True) + self.__errorItem.setForeground(0, Qt.red) + + msg = "{0} ({1})".format(self.__project.getRelativePath(filename), + message) + if not self.resultList.findItems(msg, Qt.MatchExactly): + itm = QTreeWidgetItem(self.__errorItem, [msg]) + itm.setForeground(0, Qt.red) + itm.setFirstColumnSpanned(True) def prepare(self, fileList, project): """ @@ -290,25 +296,32 @@ self.__finished = False self.radonService.rawMetricsBatch(argumentsList) - def __batchFinished(self): + def __batchFinished(self, type_): """ Private slot handling the completion of a batch job. + + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] """ - self.checkProgressLabel.setPath("") - self.checkProgress.setMaximum(1) - self.checkProgress.setValue(1) - self.__finish() + if type_ == "raw": + self.checkProgressLabel.setPath("") + self.checkProgress.setMaximum(1) + self.checkProgress.setValue(1) + self.__finish() - def __processError(self, fn, msg): + def __processError(self, type_, fn, msg): """ Private slot to process an error indication from the service. + @param type_ type of the calculated metrics + @type str, one of ["raw", "mi", "cc"] @param fn filename of the file @type str @param msg error message @type str """ - self.__createErrorItem(fn, msg) + if type_ == "raw": + self.__createErrorItem(fn, msg) def __processResult(self, fn, result): """ @@ -462,6 +475,7 @@ fileList = \ [f for f in fileList if not fnmatch.fnmatch(f, filter)] + self.__errorItem = None self.resultList.clear() self.cancelled = False self.start(fileList)