--- a/src/eric7/DataViews/CodeMetrics.py Wed Jul 13 11:16:20 2022 +0200 +++ b/src/eric7/DataViews/CodeMetrics.py Wed Jul 13 14:55:47 2022 +0200 @@ -20,7 +20,7 @@ import keyword import token import tokenize - + import Utilities KEYWORD = token.NT_OFFSET + 1 @@ -35,10 +35,11 @@ """ Class to store the token related infos. """ + def __init__(self, **kw): """ Constructor - + @keyparam **kw list of key, value pairs """ self.__dict__.update(kw) @@ -48,22 +49,23 @@ """ Class used to parse the source code of a Python file. """ + def parse(self, text): """ Public method used to parse the source code. - + @param text the source code as read from a Python source file """ self.tokenlist = [] - + # convert eols text = Utilities.convertLineEnds(text, os.linesep) - + if not text.endswith(os.linesep): text = "{0}{1}".format(text, os.linesep) - + self.lines = text.count(os.linesep) - + source = io.BytesIO(text.encode("utf-8")) try: gen = tokenize.tokenize(source.readline) @@ -73,7 +75,7 @@ if toktype in [token.NEWLINE, tokenize.NL]: self.__addToken(toktype, os.linesep, srow, scol, line) elif toktype in [token.INDENT, token.DEDENT]: - self.__addToken(toktype, '', srow, scol, line) + self.__addToken(toktype, "", srow, scol, line) elif toktype == token.NAME and keyword.iskeyword(toktext): toktype = KEYWORD self.__addToken(toktype, toktext, srow, scol, line) @@ -83,36 +85,39 @@ print("Token Error: {0}".format(str(msg))) # __IGNORE_WARNING_M801__ return - + return - + def __addToken(self, toktype, toktext, srow, scol, line): """ Private method used to add a token to our list of tokens. - + @param toktype the type of the token (int) @param toktext the text of the token (string) @param srow starting row of the token (int) @param scol starting column of the token (int) @param line logical line the token was found (string) """ - self.tokenlist.append(Token(type=toktype, text=toktext, row=srow, - col=scol, line=line)) + self.tokenlist.append( + Token(type=toktype, text=toktext, row=srow, col=scol, line=line) + ) -spacer = ' ' + +spacer = " " class SourceStat: """ Class used to calculate and store the source code statistics. """ + def __init__(self): """ Constructor """ self.identifiers = [] # list of identifiers in order of appearance - self.active = [('TOTAL ', -1, 0)] + self.active = [("TOTAL ", -1, 0)] # stack of active identifiers and indent levels self.counters = {} # counters per identifier @@ -121,7 +126,7 @@ def indent(self, tok): """ Public method used to increment the indentation level. - + @param tok a token (Token, ignored) """ self.indent_level += 1 @@ -129,7 +134,7 @@ def dedent(self, tok): """ Public method used to decrement the indentation level. - + @param tok the token to be processed (Token) @exception ValueError raised to indicate an invalid indentation level """ @@ -140,21 +145,21 @@ # remove identifiers of a higher indentation while self.active and self.active[-1][1] >= self.indent_level: counters = self.counters.setdefault(self.active[-1][0], {}) - counters['start'] = self.active[-1][2] - counters['end'] = tok.row - 1 - counters['lines'] = tok.row - self.active[-1][2] + counters["start"] = self.active[-1][2] + counters["end"] = tok.row - 1 + counters["lines"] = tok.row - self.active[-1][2] del self.active[-1] def push(self, identifier, row): """ Public method used to store an identifier. - + @param identifier the identifier to be remembered (string) @param row the row, the identifier is defined in (int) """ if len(self.active) > 1 and self.indent_level > self.active[-1][1]: # __IGNORE_WARNING_Y108__ - qualified = self.active[-1][0] + '.' + identifier + qualified = self.active[-1][0] + "." + identifier else: qualified = identifier self.active.append((qualified, self.indent_level, row)) @@ -163,7 +168,7 @@ def inc(self, key, value=1): """ Public method used to increment the value of a key. - + @param key the key to be incremented @param value the increment (int) """ @@ -174,7 +179,7 @@ def getCounter(self, counterId, key): """ Public method used to get a specific counter value. - + @param counterId id of the counter (string) @param key key of the value to be retrieved (string) @return the value of the requested counter (int) @@ -185,7 +190,7 @@ def summarize(total, key, value): """ Module function used to collect overall statistics. - + @param total the dictionary for the overall statistics @param key the key to be summarize @param value the value to be added to the overall statistics @@ -198,7 +203,7 @@ def analyze(filename, total): """ Module function used analyze the source of a Python file. - + @param filename name of the Python file to be analyzed (string) @param total dictionary receiving the overall code statistics @return a statistics object with the collected code statistics (SourceStat) @@ -212,42 +217,37 @@ parser.parse(text) stats = SourceStat() - stats.inc('lines', parser.lines) + stats.inc("lines", parser.lines) for idx in range(len(parser.tokenlist)): tok = parser.tokenlist[idx] - + # counting if tok.type == NEWLINE: - stats.inc('nloc') + stats.inc("nloc") elif tok.type == COMMENT: - stats.inc('comments') + stats.inc("comments") if tok.line.strip() == tok.text: - stats.inc('commentlines') + stats.inc("commentlines") elif tok.type == EMPTY: if parser.tokenlist[idx - 1].type == token.OP: - stats.inc('nloc') + stats.inc("nloc") elif parser.tokenlist[idx - 1].type == COMMENT: continue else: - stats.inc('empty') + stats.inc("empty") elif tok.type == INDENT: stats.indent(tok) elif tok.type == DEDENT: stats.dedent(tok) - elif ( - tok.type == KEYWORD and - tok.text in ("class", "def") - ): + elif tok.type == KEYWORD and tok.text in ("class", "def"): stats.push(parser.tokenlist[idx + 1].text, tok.row) # collect overall statistics - summarize(total, 'lines', parser.lines) - summarize(total, 'bytes', len(text)) - summarize(total, 'comments', stats.getCounter('TOTAL ', 'comments')) - summarize(total, 'commentlines', - stats.getCounter('TOTAL ', 'commentlines')) - summarize(total, 'empty lines', stats.getCounter('TOTAL ', 'empty')) - summarize(total, 'non-commentary lines', - stats.getCounter('TOTAL ', 'nloc')) + summarize(total, "lines", parser.lines) + summarize(total, "bytes", len(text)) + summarize(total, "comments", stats.getCounter("TOTAL ", "comments")) + summarize(total, "commentlines", stats.getCounter("TOTAL ", "commentlines")) + summarize(total, "empty lines", stats.getCounter("TOTAL ", "empty")) + summarize(total, "non-commentary lines", stats.getCounter("TOTAL ", "nloc")) return stats