958 self.previous_logical = '' |
959 self.previous_logical = '' |
959 self.blank_lines = 0 |
960 self.blank_lines = 0 |
960 self.blank_lines_before_comment = 0 |
961 self.blank_lines_before_comment = 0 |
961 self.tokens = [] |
962 self.tokens = [] |
962 parens = 0 |
963 parens = 0 |
963 for token in tokenize.generate_tokens(self.readline_check_physical): |
964 try: |
964 if options.verbose >= 3: |
965 for token in tokenize.generate_tokens(self.readline_check_physical): |
965 if token[2][0] == token[3][0]: |
966 if options.verbose >= 3: |
966 pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) |
967 if token[2][0] == token[3][0]: |
967 else: |
968 pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) |
968 pos = 'l.%s' % token[3][0] |
969 else: |
969 print('l.%s\t%s\t%s\t%r' % |
970 pos = 'l.%s' % token[3][0] |
970 (token[2][0], pos, tokenize.tok_name[token[0]], token[1])) |
971 print('l.%s\t%s\t%s\t%r' % |
971 self.tokens.append(token) |
972 (token[2][0], pos, tokenize.tok_name[token[0]], token[1])) |
972 token_type, text = token[0:2] |
973 self.tokens.append(token) |
973 if token_type == tokenize.OP and text in '([{': |
974 token_type, text = token[0:2] |
974 parens += 1 |
975 if token_type == tokenize.OP and text in '([{': |
975 if token_type == tokenize.OP and text in '}])': |
976 parens += 1 |
976 parens -= 1 |
977 if token_type == tokenize.OP and text in '}])': |
977 if token_type == tokenize.NEWLINE and not parens: |
978 parens -= 1 |
978 self.check_logical() |
979 if token_type == tokenize.NEWLINE and not parens: |
979 self.blank_lines = 0 |
980 self.check_logical() |
980 self.blank_lines_before_comment = 0 |
|
981 self.tokens = [] |
|
982 if token_type == tokenize.NL and not parens: |
|
983 if len(self.tokens) <= 1: |
|
984 # The physical line contains only this token. |
|
985 self.blank_lines += 1 |
|
986 self.tokens = [] |
|
987 if token_type == tokenize.COMMENT: |
|
988 source_line = token[4] |
|
989 token_start = token[2][1] |
|
990 if source_line[:token_start].strip() == '': |
|
991 self.blank_lines_before_comment = max(self.blank_lines, |
|
992 self.blank_lines_before_comment) |
|
993 self.blank_lines = 0 |
981 self.blank_lines = 0 |
994 if text.endswith('\n') and not parens: |
982 self.blank_lines_before_comment = 0 |
995 # The comment also ends a physical line. This works around |
|
996 # Python < 2.6 behaviour, which does not generate NL after |
|
997 # a comment which is on a line by itself. |
|
998 self.tokens = [] |
983 self.tokens = [] |
|
984 if token_type == tokenize.NL and not parens: |
|
985 if len(self.tokens) <= 1: |
|
986 # The physical line contains only this token. |
|
987 self.blank_lines += 1 |
|
988 self.tokens = [] |
|
989 if token_type == tokenize.COMMENT: |
|
990 source_line = token[4] |
|
991 token_start = token[2][1] |
|
992 if source_line[:token_start].strip() == '': |
|
993 self.blank_lines_before_comment = max(self.blank_lines, |
|
994 self.blank_lines_before_comment) |
|
995 self.blank_lines = 0 |
|
996 if text.endswith('\n') and not parens: |
|
997 # The comment also ends a physical line. This works around |
|
998 # Python < 2.6 behaviour, which does not generate NL after |
|
999 # a comment which is on a line by itself. |
|
1000 self.tokens = [] |
|
1001 except tokenize.TokenError, err: |
|
1002 msg, (lnum, pos) = err.args |
|
1003 self.report_error_args(lnum, pos, "E901", "TokenError", msg) |
999 return self.file_errors |
1004 return self.file_errors |
1000 |
1005 |
1001 def report_error(self, line_number, offset, text, check): |
1006 def report_error(self, line_number, offset, text, check): |
1002 """ |
1007 """ |
1003 Report an error, according to options. |
1008 Report an error, according to options. |