Python token.DEDENT Examples
The following are 15
code examples of token.DEDENT().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
token
, or try the search function
.
Example #1
Source File: pygettext.py From oss-ftp with MIT License | 6 votes |
def __openseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == ')': # We've seen the last of the translatable strings. Record the # line number of the first line of the strings and update the list # of messages seen. Reset state for the next batch. If there # were no strings inside _(), then just ignore this entry. if self.__data: self.__addentry(EMPTYSTRING.join(self.__data)) self.__state = self.__waiting elif ttype == tokenize.STRING: self.__data.append(safe_eval(tstring)) elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]: # warn if we see anything else than STRING or whitespace print >> sys.stderr, _( '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' ) % { 'token': tstring, 'file': self.__curfile, 'lineno': self.__lineno } self.__state = self.__waiting
Example #2
Source File: pygettext.py From datafari with Apache License 2.0 | 6 votes |
def __openseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == ')': # We've seen the last of the translatable strings. Record the # line number of the first line of the strings and update the list # of messages seen. Reset state for the next batch. If there # were no strings inside _(), then just ignore this entry. if self.__data: self.__addentry(EMPTYSTRING.join(self.__data)) self.__state = self.__waiting elif ttype == tokenize.STRING: self.__data.append(safe_eval(tstring)) elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]: # warn if we see anything else than STRING or whitespace print >> sys.stderr, _( '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' ) % { 'token': tstring, 'file': self.__curfile, 'lineno': self.__lineno } self.__state = self.__waiting
Example #3
Source File: pygettext.py From HRTunerProxy with GNU General Public License v2.0 | 6 votes |
def __openseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == ')': # We've seen the last of the translatable strings. Record the # line number of the first line of the strings and update the list # of messages seen. Reset state for the next batch. If there # were no strings inside _(), then just ignore this entry. if self.__data: self.__addentry(EMPTYSTRING.join(self.__data)) self.__state = self.__waiting elif ttype == tokenize.STRING: self.__data.append(safe_eval(tstring)) elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]: # warn if we see anything else than STRING or whitespace print >> sys.stderr, _( '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' ) % { 'token': tstring, 'file': self.__curfile, 'lineno': self.__lineno } self.__state = self.__waiting
Example #4
Source File: pygettext.py From odoo13-x64 with GNU General Public License v3.0 | 6 votes |
def __openseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == ')': # We've seen the last of the translatable strings. Record the # line number of the first line of the strings and update the list # of messages seen. Reset state for the next batch. If there # were no strings inside _(), then just ignore this entry. if self.__data: self.__addentry(EMPTYSTRING.join(self.__data)) self.__state = self.__waiting elif ttype == tokenize.STRING and is_literal_string(tstring): self.__data.append(safe_eval(tstring)) elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]: # warn if we see anything else than STRING or whitespace print(_( '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' ) % { 'token': tstring, 'file': self.__curfile, 'lineno': self.__lineno }, file=sys.stderr) self.__state = self.__waiting
Example #5
Source File: pygettext.py From android_universal with MIT License | 6 votes |
def __openseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == ')': # We've seen the last of the translatable strings. Record the # line number of the first line of the strings and update the list # of messages seen. Reset state for the next batch. If there # were no strings inside _(), then just ignore this entry. if self.__data: self.__addentry(EMPTYSTRING.join(self.__data)) self.__state = self.__waiting elif ttype == tokenize.STRING and is_literal_string(tstring): self.__data.append(safe_eval(tstring)) elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]: # warn if we see anything else than STRING or whitespace print(_( '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' ) % { 'token': tstring, 'file': self.__curfile, 'lineno': self.__lineno }, file=sys.stderr) self.__state = self.__waiting
Example #6
Source File: PyColorize.py From Computable with MIT License | 5 votes |
def __call__(self, toktype, toktext, start_pos, end_pos, line): """ Token handler, with syntax highlighting.""" (srow,scol) = start_pos (erow,ecol) = end_pos colors = self.colors owrite = self.out.write # line separator, so this works across platforms linesep = os.linesep # calculate new positions oldpos = self.pos newpos = self.lines[srow] + scol self.pos = newpos + len(toktext) # send the original whitespace, if needed if newpos > oldpos: owrite(self.raw[oldpos:newpos]) # skip indenting tokens if toktype in [token.INDENT, token.DEDENT]: self.pos = newpos return # map token type to a color group if token.LPAR <= toktype and toktype <= token.OP: toktype = token.OP elif toktype == token.NAME and keyword.iskeyword(toktext): toktype = _KEYWORD color = colors.get(toktype, colors[_TEXT]) #print '<%s>' % toktext, # dbg # Triple quoted strings must be handled carefully so that backtracking # in pagers works correctly. We need color terminators on _each_ line. if linesep in toktext: toktext = toktext.replace(linesep, '%s%s%s' % (colors.normal,linesep,color)) # send text owrite('%s%s%s' % (color,toktext,colors.normal))
Example #7
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #8
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #9
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #10
Source File: conala_eval.py From tranX with Apache License 2.0 | 5 votes |
def tokenize_code(string, concat_symbol=None): tokens = [] string = string.strip().decode('utf-8').encode('ascii', 'strict') #.decode('string_escape') for toknum, tokval, _, _, _ in tokenize.generate_tokens(StringIO(string).readline): # We ignore these tokens during evaluation. if toknum not in [token.ENDMARKER, token.INDENT, token.DEDENT]: tokens.append(tokval.lower()) return tokens
Example #11
Source File: py_utils.py From tranX with Apache License 2.0 | 5 votes |
def tokenize_code(code, mode=None): token_stream = generate_tokens(StringIO(code).readline) tokens = [] for toknum, tokval, (srow, scol), (erow, ecol), _ in token_stream: if toknum == tk.ENDMARKER: break if mode == 'decoder': if toknum == tk.STRING: quote = tokval[0] tokval = tokval[1:-1] tokens.append(quote) tokens.append(tokval) tokens.append(quote) elif toknum == tk.DEDENT: continue else: tokens.append(tokval) elif mode == 'canonicalize': if toknum == tk.STRING: tokens.append('_STR_') elif toknum == tk.DEDENT: continue else: tokens.append(tokval) else: tokens.append(tokval) return tokens
Example #12
Source File: doctest_driver.py From razzy-spinner with GNU General Public License v3.0 | 4 votes |
def _simulate_compile_singlemode(self, s): # Calculate line offsets lines = [0, 0] pos = 0 while 1: pos = s.find('\n', pos)+1 if not pos: break lines.append(pos) lines.append(len(s)) oldpos = 0 parenlevel = 0 deflevel = 0 output = [] stmt = [] text = StringIO(s) tok_gen = tokenize.generate_tokens(text.readline) for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen: newpos = lines[srow] + scol stmt.append(s[oldpos:newpos]) if tok != '': stmt.append(tok) oldpos = newpos + len(tok) # Update the paren level. if tok in '([{': parenlevel += 1 if tok in '}])': parenlevel -= 1 if tok in ('def', 'class') and deflevel == 0: deflevel = 1 if deflevel and toktype == token.INDENT: deflevel += 1 if deflevel and toktype == token.DEDENT: deflevel -= 1 # Are we starting a statement? if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT, token.INDENT, token.ENDMARKER) or tok==':') and parenlevel == 0): if deflevel == 0 and self._is_expr(stmt[1:-2]): output += stmt[0] output.append('__print__((') output += stmt[1:-2] output.append('))') output += stmt[-2:] else: output += stmt stmt = [] return ''.join(output)
Example #13
Source File: phystokens.py From coveragepy-bbmirror with Apache License 2.0 | 4 votes |
def source_token_lines(source): """Generate a series of lines, one for each line in `source`. Each line is a list of pairs, each pair is a token:: [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] Each pair has a token class, and the token text. If you concatenate all the token texts, and then join them with newlines, you should have your original `source` back, with two differences: trailing whitespace is not preserved, and a final line with no newline is indistinguishable from a final line with a newline. """ ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]) line = [] col = 0 source = source.expandtabs(8).replace('\r\n', '\n') tokgen = generate_tokens(source) for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen): mark_start = True for part in re.split('(\n)', ttext): if part == '\n': yield line line = [] col = 0 mark_end = False elif part == '': mark_end = False elif ttype in ws_tokens: mark_end = False else: if mark_start and scol > col: line.append(("ws", u" " * (scol - col))) mark_start = False tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] if ttype == token.NAME and keyword.iskeyword(ttext): tok_class = "key" line.append((tok_class, part)) mark_end = True scol = 0 if mark_end: col = ecol if line: yield line
Example #14
Source File: phystokens.py From coveragepy with Apache License 2.0 | 4 votes |
def source_token_lines(source): """Generate a series of lines, one for each line in `source`. Each line is a list of pairs, each pair is a token:: [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] Each pair has a token class, and the token text. If you concatenate all the token texts, and then join them with newlines, you should have your original `source` back, with two differences: trailing whitespace is not preserved, and a final line with no newline is indistinguishable from a final line with a newline. """ ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]) line = [] col = 0 source = source.expandtabs(8).replace('\r\n', '\n') tokgen = generate_tokens(source) for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen): mark_start = True for part in re.split('(\n)', ttext): if part == '\n': yield line line = [] col = 0 mark_end = False elif part == '': mark_end = False elif ttype in ws_tokens: mark_end = False else: if mark_start and scol > col: line.append(("ws", u" " * (scol - col))) mark_start = False tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] if ttype == token.NAME and keyword.iskeyword(ttext): tok_class = "key" line.append((tok_class, part)) mark_end = True scol = 0 if mark_end: col = ecol if line: yield line
Example #15
Source File: doctest_driver.py From luscan-devel with GNU General Public License v2.0 | 4 votes |
def _simulate_compile_singlemode(self, s): # Calculate line offsets lines = [0, 0] pos = 0 while 1: pos = s.find('\n', pos)+1 if not pos: break lines.append(pos) lines.append(len(s)) oldpos = 0 parenlevel = 0 deflevel = 0 output = [] stmt = [] text = StringIO(s) tok_gen = tokenize.generate_tokens(text.readline) for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen: newpos = lines[srow] + scol stmt.append(s[oldpos:newpos]) if tok != '': stmt.append(tok) oldpos = newpos + len(tok) # Update the paren level. if tok in '([{': parenlevel += 1 if tok in '}])': parenlevel -= 1 if tok in ('def', 'class') and deflevel == 0: deflevel = 1 if deflevel and toktype == token.INDENT: deflevel += 1 if deflevel and toktype == token.DEDENT: deflevel -= 1 # Are we starting a statement? if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT, token.INDENT, token.ENDMARKER) or tok==':') and parenlevel == 0): if deflevel == 0 and self._is_expr(stmt[1:-2]): output += stmt[0] output.append('__print__((') output += stmt[1:-2] output.append('))') output += stmt[-2:] else: output += stmt stmt = [] return ''.join(output)