Plugins/CheckerPlugins/Pep8/pep8.py

changeset 2799
ec8c717e80f5
parent 2302
f29e9405c851
child 2847
1843ef6e2656
child 2861
cdcbca0cea82
child 2931
552d5934c6f6
--- a/Plugins/CheckerPlugins/Pep8/pep8.py	Mon Jul 15 18:42:44 2013 +0200
+++ b/Plugins/CheckerPlugins/Pep8/pep8.py	Wed Jul 17 20:13:50 2013 +0200
@@ -44,6 +44,7 @@
 500 line length
 600 deprecation
 700 statements
+900 processing errors
 
 You can add checks to this program by writing plugins. Each plugin is
 a simple function that is called for each line of source code, either
@@ -233,6 +234,8 @@
         "multiple statements on one line (colon)"),
     "E702": QT_TRANSLATE_NOOP("pep8",
         "multiple statements on one line (semicolon)"),
+    "E901": QT_TRANSLATE_NOOP("pep8",
+        "Token Error: {0}"),
 }
 
 pep8_messages_sample_args = {
@@ -1069,42 +1072,46 @@
         self.blank_lines_before_comment = 0
         self.tokens = []
         parens = 0
-        for token in tokenize.generate_tokens(self.readline_check_physical):
-            if options.verbose >= 3:
-                if token[2][0] == token[3][0]:
-                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
-                else:
-                    pos = 'l.%s' % token[3][0]
-                print('l.%s\t%s\t%s\t%r' %
-                    (token[2][0], pos, tokenize.tok_name[token[0]], token[1]))
-            self.tokens.append(token)
-            token_type, text = token[0:2]
-            if token_type == tokenize.OP and text in '([{':
-                parens += 1
-            if token_type == tokenize.OP and text in '}])':
-                parens -= 1
-            if token_type == tokenize.NEWLINE and not parens:
-                self.check_logical()
-                self.blank_lines = 0
-                self.blank_lines_before_comment = 0
-                self.tokens = []
-            if token_type == tokenize.NL and not parens:
-                if len(self.tokens) <= 1:
-                    # The physical line contains only this token.
-                    self.blank_lines += 1
-                self.tokens = []
-            if token_type == tokenize.COMMENT:
-                source_line = token[4]
-                token_start = token[2][1]
-                if source_line[:token_start].strip() == '':
-                    self.blank_lines_before_comment = max(self.blank_lines,
-                        self.blank_lines_before_comment)
+        try:
+            for token in tokenize.generate_tokens(self.readline_check_physical):
+                if options.verbose >= 3:
+                    if token[2][0] == token[3][0]:
+                        pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
+                    else:
+                        pos = 'l.%s' % token[3][0]
+                    print('l.%s\t%s\t%s\t%r' %
+                        (token[2][0], pos, tokenize.tok_name[token[0]], token[1]))
+                self.tokens.append(token)
+                token_type, text = token[0:2]
+                if token_type == tokenize.OP and text in '([{':
+                    parens += 1
+                if token_type == tokenize.OP and text in '}])':
+                    parens -= 1
+                if token_type == tokenize.NEWLINE and not parens:
+                    self.check_logical()
                     self.blank_lines = 0
-                if text.endswith('\n') and not parens:
-                    # The comment also ends a physical line.  This works around
-                    # Python < 2.6 behaviour, which does not generate NL after
-                    # a comment which is on a line by itself.
+                    self.blank_lines_before_comment = 0
+                    self.tokens = []
+                if token_type == tokenize.NL and not parens:
+                    if len(self.tokens) <= 1:
+                        # The physical line contains only this token.
+                        self.blank_lines += 1
                     self.tokens = []
+                if token_type == tokenize.COMMENT:
+                    source_line = token[4]
+                    token_start = token[2][1]
+                    if source_line[:token_start].strip() == '':
+                        self.blank_lines_before_comment = max(self.blank_lines,
+                            self.blank_lines_before_comment)
+                        self.blank_lines = 0
+                    if text.endswith('\n') and not parens:
+                        # The comment also ends a physical line.  This works around
+                        # Python < 2.6 behaviour, which does not generate NL after
+                        # a comment which is on a line by itself.
+                        self.tokens = []
+        except tokenize.TokenError as err:
+            msg, (lnum, pos) = err.args
+            self.report_error_args(lnum, pos, "E901", "TokenError", msg)
         return self.file_errors
 
     def report_error(self, line_number, offset, text, check):

eric ide

mercurial