Plugins/CheckerPlugins/Pep8/pep8.py

branch
5_3_x
changeset 2801
8ccc38b80dc2
parent 2302
f29e9405c851
child 2847
1843ef6e2656
child 2861
cdcbca0cea82
child 2931
552d5934c6f6
equal deleted inserted replaced
2797:39951e911d6b 2801:8ccc38b80dc2
42 300 blank lines 42 300 blank lines
43 400 imports 43 400 imports
44 500 line length 44 500 line length
45 600 deprecation 45 600 deprecation
46 700 statements 46 700 statements
47 900 processing errors
47 48
48 You can add checks to this program by writing plugins. Each plugin is 49 You can add checks to this program by writing plugins. Each plugin is
49 a simple function that is called for each line of source code, either 50 a simple function that is called for each line of source code, either
50 physical or logical. 51 physical or logical.
51 52
231 "backticks are deprecated, use 'repr()'"), 232 "backticks are deprecated, use 'repr()'"),
232 "E701": QT_TRANSLATE_NOOP("pep8", 233 "E701": QT_TRANSLATE_NOOP("pep8",
233 "multiple statements on one line (colon)"), 234 "multiple statements on one line (colon)"),
234 "E702": QT_TRANSLATE_NOOP("pep8", 235 "E702": QT_TRANSLATE_NOOP("pep8",
235 "multiple statements on one line (semicolon)"), 236 "multiple statements on one line (semicolon)"),
237 "E901": QT_TRANSLATE_NOOP("pep8",
238 "Token Error: {0}"),
236 } 239 }
237 240
238 pep8_messages_sample_args = { 241 pep8_messages_sample_args = {
239 "E201": ["([{"], 242 "E201": ["([{"],
240 "E202": ["}])"], 243 "E202": ["}])"],
1067 self.previous_logical = '' 1070 self.previous_logical = ''
1068 self.blank_lines = 0 1071 self.blank_lines = 0
1069 self.blank_lines_before_comment = 0 1072 self.blank_lines_before_comment = 0
1070 self.tokens = [] 1073 self.tokens = []
1071 parens = 0 1074 parens = 0
1072 for token in tokenize.generate_tokens(self.readline_check_physical): 1075 try:
1073 if options.verbose >= 3: 1076 for token in tokenize.generate_tokens(self.readline_check_physical):
1074 if token[2][0] == token[3][0]: 1077 if options.verbose >= 3:
1075 pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) 1078 if token[2][0] == token[3][0]:
1076 else: 1079 pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
1077 pos = 'l.%s' % token[3][0] 1080 else:
1078 print('l.%s\t%s\t%s\t%r' % 1081 pos = 'l.%s' % token[3][0]
1079 (token[2][0], pos, tokenize.tok_name[token[0]], token[1])) 1082 print('l.%s\t%s\t%s\t%r' %
1080 self.tokens.append(token) 1083 (token[2][0], pos, tokenize.tok_name[token[0]], token[1]))
1081 token_type, text = token[0:2] 1084 self.tokens.append(token)
1082 if token_type == tokenize.OP and text in '([{': 1085 token_type, text = token[0:2]
1083 parens += 1 1086 if token_type == tokenize.OP and text in '([{':
1084 if token_type == tokenize.OP and text in '}])': 1087 parens += 1
1085 parens -= 1 1088 if token_type == tokenize.OP and text in '}])':
1086 if token_type == tokenize.NEWLINE and not parens: 1089 parens -= 1
1087 self.check_logical() 1090 if token_type == tokenize.NEWLINE and not parens:
1088 self.blank_lines = 0 1091 self.check_logical()
1089 self.blank_lines_before_comment = 0
1090 self.tokens = []
1091 if token_type == tokenize.NL and not parens:
1092 if len(self.tokens) <= 1:
1093 # The physical line contains only this token.
1094 self.blank_lines += 1
1095 self.tokens = []
1096 if token_type == tokenize.COMMENT:
1097 source_line = token[4]
1098 token_start = token[2][1]
1099 if source_line[:token_start].strip() == '':
1100 self.blank_lines_before_comment = max(self.blank_lines,
1101 self.blank_lines_before_comment)
1102 self.blank_lines = 0 1092 self.blank_lines = 0
1103 if text.endswith('\n') and not parens: 1093 self.blank_lines_before_comment = 0
1104 # The comment also ends a physical line. This works around
1105 # Python < 2.6 behaviour, which does not generate NL after
1106 # a comment which is on a line by itself.
1107 self.tokens = [] 1094 self.tokens = []
1095 if token_type == tokenize.NL and not parens:
1096 if len(self.tokens) <= 1:
1097 # The physical line contains only this token.
1098 self.blank_lines += 1
1099 self.tokens = []
1100 if token_type == tokenize.COMMENT:
1101 source_line = token[4]
1102 token_start = token[2][1]
1103 if source_line[:token_start].strip() == '':
1104 self.blank_lines_before_comment = max(self.blank_lines,
1105 self.blank_lines_before_comment)
1106 self.blank_lines = 0
1107 if text.endswith('\n') and not parens:
1108 # The comment also ends a physical line. This works around
1109 # Python < 2.6 behaviour, which does not generate NL after
1110 # a comment which is on a line by itself.
1111 self.tokens = []
1112 except tokenize.TokenError as err:
1113 msg, (lnum, pos) = err.args
1114 self.report_error_args(lnum, pos, "E901", "TokenError", msg)
1108 return self.file_errors 1115 return self.file_errors
1109 1116
1110 def report_error(self, line_number, offset, text, check): 1117 def report_error(self, line_number, offset, text, check):
1111 """ 1118 """
1112 Report an error, according to options. 1119 Report an error, according to options.

eric ide

mercurial