Python tokenize.tok_name() Examples
The following are 19
code examples of tokenize.tok_name().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tokenize
, or try the search function
.
Example #1
Source File: base.py From wemake-python-styleguide with MIT License | 6 votes |
def visit(self, token: tokenize.TokenInfo) -> None: """ Runs custom defined handlers in a visitor for each specific token type. Uses ``.exact_type`` property to fetch the token name. So, you have to be extra careful with tokens like ``->`` and other operators, since they might resolve in just ``OP`` name. Does nothing if handler for any token type is not defined. Inspired by ``NodeVisitor`` class. See also: https://docs.python.org/3/library/tokenize.html """ token_type = tokenize.tok_name[token.exact_type].lower() method = getattr(self, 'visit_{0}'.format(token_type), None) if method is not None: method(token)
Example #2
Source File: pythondoc.py From InternationalizationScript-iOS with MIT License | 6 votes |
def handle_token(self, *args): # dispatch incoming tokens to the current handler if DEBUG > 1: print self.handler.im_func.func_name, self.indent, print tokenize.tok_name[args[0]], repr(args[1]) if args[0] == tokenize.DEDENT: self.indent = self.indent - 1 while self.scope and self.scope[-1][0] >= self.indent: del self.scope[-1] del self.stack[-1] self.handler = apply(self.handler, args) if args[0] == tokenize.INDENT: self.indent = self.indent + 1 ## # (Token handler) Scans for encoding directive.
Example #3
Source File: pythondoc.py From InternationalizationScript-iOS with MIT License | 6 votes |
def handle_token(self, *args): # dispatch incoming tokens to the current handler if DEBUG > 1: print self.handler.im_func.func_name, self.indent, print tokenize.tok_name[args[0]], repr(args[1]) if args[0] == tokenize.DEDENT: self.indent = self.indent - 1 while self.scope and self.scope[-1][0] >= self.indent: del self.scope[-1] del self.stack[-1] self.handler = apply(self.handler, args) if args[0] == tokenize.INDENT: self.indent = self.indent + 1 ## # (Token handler) Scans for encoding directive.
Example #4
Source File: recipe-511473.py From code with MIT License | 5 votes |
def _error(tok, msg): raise SyntaxError("%s token(%s, '%s') at %s = from/to [row, col]" % (msg, tokenize.tok_name[tok[0]], tok[1], str(tok[2:4])))
Example #5
Source File: token_generator.py From pasta with Apache License 2.0 | 5 votes |
def next_of_type(self, token_type): """Parse a token of the given type and return it.""" token = self.next() if token.type != token_type: raise ValueError("Expected %r but found %r\nline %d: %s" % ( tokenize.tok_name[token_type], token.src, token.start[0], self.lines[token.start[0] - 1])) return token
Example #6
Source File: processor.py From blackmamba with MIT License | 5 votes |
def log_token(log, token): """Log a token to a provided logging object.""" if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] log.log(flake8._EXTRA_VERBOSE, 'l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], token[1])) # NOTE(sigmavirus24): This was taken wholesale from # https://github.com/PyCQA/pycodestyle
Example #7
Source File: processor.py From linter-pylama with MIT License | 5 votes |
def log_token(log, token): """Log a token to a provided logging object.""" if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] log.log(flake8._EXTRA_VERBOSE, 'l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], token[1])) # NOTE(sigmavirus24): This was taken wholesale from # https://github.com/PyCQA/pycodestyle
Example #8
Source File: QuestParser.py From Pirates-Online-Rewritten with BSD 3-Clause "New" or "Revised" License | 5 votes |
def getLineOfTokens(gen): tokens = [] nextNeg = 0 token = gen.next() if token[0] == tokenize.ENDMARKER: return None while token[0] != tokenize.NEWLINE and token[0] != tokenize.NL: if token[0] == tokenize.COMMENT: pass elif token[0] == tokenize.OP and token[1] == '-': nextNeg = 1 elif token[0] == tokenize.NUMBER: if nextNeg: tokens.append(-eval(token[1])) nextNeg = 0 else: tokens.append(eval(token[1])) elif token[0] == tokenize.STRING: tokens.append(eval(token[1])) elif token[0] == tokenize.NAME: tokens.append(token[1]) else: notify.warning('Ignored token type: %s on line: %s' % (tokenize.tok_name[token[0]], token[2][0])) token = gen.next() return tokens
Example #9
Source File: checks.py From networking-odl with Apache License 2.0 | 5 votes |
def _check_opendaylight_lowercase(logical_line, tokens, noqa, token_type): """ND01 - Enforce using OpenDaylight in given token.""" if noqa: return for _token_type, text, start_index, _, _ in tokens: if _token_type == token_type: pos = text.find(_ND01_OPENDAYLIGHT) if pos >= 0: msg = "{} in {}".format( _ND01_MSG, tokenize.tok_name[token_type].lower()) yield (start_index[1] + pos, msg)
Example #10
Source File: parser.py From YAPyPy with MIT License | 5 votes |
def to_rbnf_token(tk: tokenize.TokenInfo) -> Tokenizer: name = cast(tokenize.tok_name[tk.type]) if name == 'NAME' and tk.string in kwlist: value = cast(tk.string) name = cast('KEYWORD') else: value = cast(tk.string) if name not in ('NAME', 'STRING', 'NUMBER') else tk.string return Tokenizer(name, value, *tk.start)
Example #11
Source File: parser.py From linter-pylama with MIT License | 5 votes |
def __repr__(self): return "tk.{}".format(tk.tok_name[self])
Example #12
Source File: pycodestyle.py From PyDev.Debugger with Eclipse Public License 1.0 | 4 votes |
def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: if len(self.tokens) == 1: # The comment also ends a physical line token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], token[2][1] + len(token[1])) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()
Example #13
Source File: pep8.py From cadquery-freecad-module with GNU Lesser General Public License v3.0 | 4 votes |
def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: if len(self.tokens) == 1: # The comment also ends a physical line token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], token[2][1] + len(token[1])) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()
Example #14
Source File: pycodestyle.py From python-netsurv with MIT License | 4 votes |
def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()
Example #15
Source File: pycodestyle.py From python-netsurv with MIT License | 4 votes |
def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()
Example #16
Source File: pycodestyle.py From blackmamba with MIT License | 4 votes |
def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: if len(self.tokens) == 1: # The comment also ends a physical line token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], token[2][1] + len(token[1])) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()
Example #17
Source File: pep8.py From TextDetector with GNU General Public License v3.0 | 4 votes |
def check_all(self, expected=None, line_offset=0): """ Run all checks on the input file. """ self.report.init_file(self.filename, self.lines, expected, line_offset) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = 0 self.previous_indent_level = 0 self.previous_logical = '' self.tokens = [] self.blank_lines = blank_lines_before_comment = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type == tokenize.NEWLINE: if self.blank_lines < blank_lines_before_comment: self.blank_lines = blank_lines_before_comment self.check_logical() self.blank_lines = blank_lines_before_comment = 0 elif token_type == tokenize.NL: if len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif token_type == tokenize.COMMENT and len(self.tokens) == 1: if blank_lines_before_comment < self.blank_lines: blank_lines_before_comment = self.blank_lines self.blank_lines = 0 if COMMENT_WITH_NL: # The comment also ends a physical line text = text.rstrip('\r\n') self.tokens = [(token_type, text) + token[2:]] self.check_logical() return self.report.get_file_results()
Example #18
Source File: pep8.py From filmkodi with Apache License 2.0 | 4 votes |
def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: if len(self.tokens) == 1: # The comment also ends a physical line token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], token[2][1] + len(token[1])) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()
Example #19
Source File: pycodestyle.py From linter-pylama with MIT License | 4 votes |
def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: if len(self.tokens) == 1: # The comment also ends a physical line token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], token[2][1] + len(token[1])) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()