Python token.type() Examples

The following are 30 code examples of token.type(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module token , or try the search function .
Example #1
Source File: annotate.py    From pasta with Apache License 2.0 6 votes vote down vote up
def visit_ExceptHandler(self, node):
    self.token('except')
    if node.type:
      self.visit(node.type)
    if node.type and node.name:
      self.attr(node, 'as', [self.ws, self.one_of_symbols("as", ","), self.ws],
                default=' as ')
    if node.name:
      if isinstance(node.name, ast.AST):
        self.visit(node.name)
      else:
        self.token(node.name)
    self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
              default=':\n')
    for stmt in self.indented(node, 'body'):
      self.visit(stmt) 
Example #2
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Sub(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #3
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_BitXor(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #4
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_BitAnd(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #5
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Lt(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #6
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_LtE(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #7
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_GtE(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #8
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Is(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #9
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_In(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #10
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def indented(self, node, children_attr):
    """Generator which annotates child nodes with their indentation level."""
    children = getattr(node, children_attr)
    cur_loc = self.tokens._loc
    next_loc = self.tokens.peek_non_whitespace().start
    # Special case: if the children are on the same line, then there is no
    # indentation level to track.
    if cur_loc[0] == next_loc[0]:
      indent_diff = self._indent_diff
      self._indent_diff = None
      for child in children:
        yield child
      self._indent_diff = indent_diff
      return

    prev_indent = self._indent
    prev_indent_diff = self._indent_diff

    # Find the indent level of the first child
    indent_token = self.tokens.peek_conditional(
        lambda t: t.type == token_generator.TOKENS.INDENT)
    new_indent = indent_token.src
    new_diff = _get_indent_diff(prev_indent, new_indent)
    if not new_diff:
      new_diff = ' ' * 4  # Sensible default
      print('Indent detection failed (line %d); inner indentation level is not '
            'more than the outer indentation.' % cur_loc[0], file=sys.stderr)

    # Set the indent level to the child's indent and iterate over the children
    self._indent = new_indent
    self._indent_diff = new_diff
    for child in children:
      yield child
    # Store the suffix at this indentation level, which could be many lines
    fmt.set(node, 'block_suffix_%s' % children_attr,
            self.tokens.block_whitespace(self._indent))

    # Dedent back to the previous level
    self._indent = prev_indent
    self._indent_diff = prev_indent_diff 
Example #11
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def ws(self, max_lines=None, semicolon=False, comment=True):
    """Parse some whitespace from the source tokens and return it."""
    next_token = self.tokens.peek()
    if semicolon and next_token and next_token.src == ';':
      result = self.tokens.whitespace() + self.token(';')
      next_token = self.tokens.peek()
      if next_token.type in (token_generator.TOKENS.NL,
                             token_generator.TOKENS.NEWLINE):
        result += self.tokens.whitespace(max_lines=1)
      return result
    return self.tokens.whitespace(max_lines=max_lines, comment=comment) 
Example #12
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def _optional_token(self, token_type, token_val):
    token = self.tokens.peek()
    if not token or token.type != token_type or token.src != token_val:
      return ''
    else:
      self.tokens.next()
      return token.src + self.ws() 
Example #13
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Eq(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #14
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Mod(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #15
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_MatMult(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #16
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Div(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #17
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Pow(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #18
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Add(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #19
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Or(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #20
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_And(self, node):
    self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) 
Example #21
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_UnaryOp(self, node):
    op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
    self.attr(node, 'op', [op_symbol, self.ws], default=op_symbol, deps=('op',))
    self.visit(node.operand)

  # ============================================================================
  # == OPERATORS AND TOKENS: Anything that's just whitespace and tokens       ==
  # ============================================================================ 
Example #22
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_BinOp(self, node):
    op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
    self.visit(node.left)
    self.attr(node, 'op', [self.ws, op_symbol, self.ws],
              default=' %s ' % op_symbol, deps=('op',))
    self.visit(node.right) 
Example #23
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_AugAssign(self, node):
    self.visit(node.target)
    op_token = '%s=' % ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
    self.attr(node, 'operator', [self.ws, op_token, self.ws],
              default=' %s ' % op_token)
    self.visit(node.value) 
Example #24
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Raise(self, node):
    if hasattr(node, 'cause'):
      return self.visit_Raise_3(node)

    self.token('raise')
    if node.type:
      self.attr(node, 'type_prefix', [self.ws], default=' ')
      self.visit(node.type)
    if node.inst:
      self.attr(node, 'inst_prefix', [self.ws, ',', self.ws], default=', ')
      self.visit(node.inst)
    if node.tback:
      self.attr(node, 'tback_prefix', [self.ws, ',', self.ws], default=', ')
      self.visit(node.tback) 
Example #25
Source File: annotate.py    From pasta with Apache License 2.0 5 votes vote down vote up
def visit_Module(self, node):
    try:
      self.attr(
          node, 'bom',
          [lambda: self.tokens.eat_tokens(lambda t: t.type == token.ERRORTOKEN)],
          default='')
    except:
      pass
    self.generic_visit(node) 
Example #26
Source File: tokenize.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def tokenize(readline):
    """
    The tokenize() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects.  Each call to the function
    should return one line of input as bytes.  Alternatively, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile, 'rb').__next__  # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found.  The line passed is the
    logical line; continuation lines are included.

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
    """
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
    from itertools import chain, repeat
    encoding, consumed = detect_encoding(readline)
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) 
Example #27
Source File: tokenize.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type 
Example #28
Source File: tokenize.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def __repr__(self):
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type)) 
Example #29
Source File: util.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def get(self, obj, cls):
    """
    Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
    or `obj.visit_default` if the type-specific method is not found.
    """
    method = self._cache.get(cls)
    if not method:
      name = "visit_" + cls.__name__.lower()
      method = getattr(obj, name, obj.visit_default)
      self._cache[cls] = method
    return method 
Example #30
Source File: util.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def expect_token(token, tok_type, tok_str=None):
  """
  Verifies that the given token is of the expected type. If tok_str is given, the token string
  is verified too. If the token doesn't match, raises an informative ValueError.
  """
  if not match_token(token, tok_type, tok_str):
    raise ValueError("Expected token %s, got %s on line %s col %s" % (
      token_repr(tok_type, tok_str), str(token),
      token.start[0], token.start[1] + 1))

# These were previously defined in tokenize.py and distinguishable by being greater than
# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.