Python token.ENDMARKER Examples
The following are 22
code examples of token.ENDMARKER().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
token
, or try the search function
.
Example #1
Source File: stencil.py From stencil with MIT License | 6 votes |
def parse_kwargs(self): kwargs = {} tok = self.current while tok.exact_type != token.ENDMARKER: if tok.exact_type == token.NEWLINE: tok = self.next() continue if tok.exact_type != token.NAME: raise SyntaxError(f"Expected name, found {tok}") name = tok.string tok = self.next() if tok.exact_type != token.EQUAL: raise SyntaxError(f"Expected =, found {tok}") tok = self.next() kwargs[name] = self._parse() tok = self.next() return kwargs
Example #2
Source File: transformer.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #3
Source File: transformer.py From CTFCrackTools with GNU General Public License v3.0 | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #4
Source File: transformer.py From CTFCrackTools with GNU General Public License v3.0 | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #5
Source File: stencil.py From stencil with MIT License | 5 votes |
def parse(s): p = Expression(s) result = p._parse() if p.current.exact_type not in (token.NEWLINE, token.ENDMARKER): raise SyntaxError(f"Parse ended unexpectedly: {p.current}") return result
Example #6
Source File: transformer.py From PokemonGo-DesktopMap with MIT License | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #7
Source File: transformer.py From CTFCrackTools-V2 with GNU General Public License v3.0 | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #8
Source File: transformer.py From CTFCrackTools-V2 with GNU General Public License v3.0 | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #9
Source File: transformer.py From medicare-demo with Apache License 2.0 | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #10
Source File: test_mark_tokens.py From asttokens with Apache License 2.0 | 5 votes |
def create_mark_checker(self, source, verify=True): atok = self.create_asttokens(source) checker = tools.MarkChecker(atok) # The last token should always be an ENDMARKER # None of the nodes should contain that token assert atok.tokens[-1].type == token.ENDMARKER if atok.text: # except for empty files for node in checker.all_nodes: assert node.last_token.type != token.ENDMARKER if verify: checker.verify_all_nodes(self) return checker
Example #11
Source File: py_utils.py From tranX with Apache License 2.0 | 5 votes |
def tokenize_code(code, mode=None): token_stream = generate_tokens(StringIO(code).readline) tokens = [] for toknum, tokval, (srow, scol), (erow, ecol), _ in token_stream: if toknum == tk.ENDMARKER: break if mode == 'decoder': if toknum == tk.STRING: quote = tokval[0] tokval = tokval[1:-1] tokens.append(quote) tokens.append(tokval) tokens.append(quote) elif toknum == tk.DEDENT: continue else: tokens.append(tokval) elif mode == 'canonicalize': if toknum == tk.STRING: tokens.append('_STR_') elif toknum == tk.DEDENT: continue else: tokens.append(tokval) else: tokens.append(tokval) return tokens
Example #12
Source File: conala_eval.py From tranX with Apache License 2.0 | 5 votes |
def tokenize_code(string, concat_symbol=None): tokens = [] string = string.strip().decode('utf-8').encode('ascii', 'strict') #.decode('string_escape') for toknum, tokval, _, _, _ in tokenize.generate_tokens(StringIO(string).readline): # We ignore these tokens during evaluation. if toknum not in [token.ENDMARKER, token.INDENT, token.DEDENT]: tokens.append(tokval.lower()) return tokens
Example #13
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #14
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #15
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #16
Source File: transformer.py From oss-ftp with MIT License | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #17
Source File: transformer.py From BinderFilter with MIT License | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #18
Source File: transformer.py From ironpython2 with Apache License 2.0 | 5 votes |
def file_input(self, nodelist): doc = self.get_docstring(nodelist, symbol.file_input) if doc is not None: i = 1 else: i = 0 stmts = [] for node in nodelist[i:]: if node[0] != token.ENDMARKER and node[0] != token.NEWLINE: self.com_append_stmt(stmts, node) return Module(doc, Stmt(stmts))
Example #19
Source File: doctest_driver.py From luscan-devel with GNU General Public License v2.0 | 4 votes |
def _simulate_compile_singlemode(self, s): # Calculate line offsets lines = [0, 0] pos = 0 while 1: pos = s.find('\n', pos)+1 if not pos: break lines.append(pos) lines.append(len(s)) oldpos = 0 parenlevel = 0 deflevel = 0 output = [] stmt = [] text = StringIO(s) tok_gen = tokenize.generate_tokens(text.readline) for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen: newpos = lines[srow] + scol stmt.append(s[oldpos:newpos]) if tok != '': stmt.append(tok) oldpos = newpos + len(tok) # Update the paren level. if tok in '([{': parenlevel += 1 if tok in '}])': parenlevel -= 1 if tok in ('def', 'class') and deflevel == 0: deflevel = 1 if deflevel and toktype == token.INDENT: deflevel += 1 if deflevel and toktype == token.DEDENT: deflevel -= 1 # Are we starting a statement? if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT, token.INDENT, token.ENDMARKER) or tok==':') and parenlevel == 0): if deflevel == 0 and self._is_expr(stmt[1:-2]): output += stmt[0] output.append('__print__((') output += stmt[1:-2] output.append('))') output += stmt[-2:] else: output += stmt stmt = [] return ''.join(output)
Example #20
Source File: doctest_driver.py From razzy-spinner with GNU General Public License v3.0 | 4 votes |
def _simulate_compile_singlemode(self, s): # Calculate line offsets lines = [0, 0] pos = 0 while 1: pos = s.find('\n', pos)+1 if not pos: break lines.append(pos) lines.append(len(s)) oldpos = 0 parenlevel = 0 deflevel = 0 output = [] stmt = [] text = StringIO(s) tok_gen = tokenize.generate_tokens(text.readline) for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen: newpos = lines[srow] + scol stmt.append(s[oldpos:newpos]) if tok != '': stmt.append(tok) oldpos = newpos + len(tok) # Update the paren level. if tok in '([{': parenlevel += 1 if tok in '}])': parenlevel -= 1 if tok in ('def', 'class') and deflevel == 0: deflevel = 1 if deflevel and toktype == token.INDENT: deflevel += 1 if deflevel and toktype == token.DEDENT: deflevel -= 1 # Are we starting a statement? if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT, token.INDENT, token.ENDMARKER) or tok==':') and parenlevel == 0): if deflevel == 0 and self._is_expr(stmt[1:-2]): output += stmt[0] output.append('__print__((') output += stmt[1:-2] output.append('))') output += stmt[-2:] else: output += stmt stmt = [] return ''.join(output)
Example #21
Source File: stdlib_error_helpers.py From thonny with MIT License | 4 votes |
def __init__(self, error_info): import tokenize super().__init__(error_info) self.tokens = [] self.token_error = None if self.error_info["message"] == "EOL while scanning string literal": self.intro_text = ( "You haven't properly closed the string on line %s." % self.error_info["lineno"] + "\n(If you want a multi-line string, then surround it with" + " `'''` or `\"\"\"` at both ends.)" ) elif self.error_info["message"] == "EOF while scanning triple-quoted string literal": # lineno is not useful, as it is at the end of the file and user probably # didn't want the string to end there self.intro_text = "You haven't properly closed a triple-quoted string" else: if self.error_info["filename"] and os.path.isfile(self.error_info["filename"]): with open(self.error_info["filename"], mode="rb") as fp: try: for t in tokenize.tokenize(fp.readline): self.tokens.append(t) except tokenize.TokenError as e: self.token_error = e except IndentationError as e: self.indentation_error = e if not self.tokens or self.tokens[-1].type not in [ token.ERRORTOKEN, token.ENDMARKER, ]: self.tokens.append(tokenize.TokenInfo(token.ERRORTOKEN, "", None, None, "")) else: self.tokens = [] unbalanced = self._sug_unbalanced_parens() if unbalanced: self.intro_text = ( "Unbalanced parentheses, brackets or braces:\n\n" + unbalanced.body ) self.intro_confidence = 5 else: self.intro_text = "Python doesn't know how to read your program." if "^" in str(self.error_info): self.intro_text += ( "\n\nSmall `^` in the original error message shows where it gave up," + " but the actual mistake can be before this." ) self.suggestions = [self._sug_missing_or_misplaced_colon()]
Example #22
Source File: stencil.py From stencil with MIT License | 4 votes |
def _parse(self): tok = self.current if tok.exact_type in (token.ENDMARKER, token.COMMA): return # TODO if tok.exact_type == token.STRING: self.next() return AstLiteral(tok.string[1:-1]) if tok.exact_type == token.NUMBER: self.next() try: value = int(tok.string) except ValueError: value = float(tok.string) return AstLiteral(value) if tok.exact_type == token.NAME: state = AstContext(tok.string) while True: tok = self.next() if tok.exact_type == token.DOT: tok = self.next() if tok.exact_type != token.NAME: raise SyntaxError(f"Invalid attr lookup: {tok}") state = AstAttr(state, tok.string) elif tok.exact_type == token.LSQB: self.next() right = self._parse() state = AstLookup(state, right) if self.current.exact_type != token.RSQB: raise SyntaxError(f"Expected ] but found {self.current}") elif tok.exact_type == token.LPAR: state = AstCall(state) self.next() while self.current.exact_type != token.RPAR: arg = self._parse() state.add_arg(arg) if self.current.exact_type != token.COMMA: break self.next() if self.current.exact_type != token.RPAR: raise SyntaxError(f"Expected ( but found {self.current}") self.next() else: break return state raise SyntaxError( f"Error parsing expression {tok.line !r}: Unexpected token {tok.string!r} at position {tok.start[0]}." )