Python token.NT_OFFSET Examples
The following are 3
code examples of token.NT_OFFSET().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
token
, or try the search function
.
Example #1
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #2
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #3
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)