Python tokenize.NL Examples
The following are 30
code examples of tokenize.NL().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tokenize
, or try the search function
.
Example #1
Source File: format.py From python-netsurv with MIT License | 6 votes |
def _has_valid_type_annotation(self, tokens, i): """Extended check of PEP-484 type hint presence""" if not self._inside_brackets("("): return False # token_info # type string start end line # 0 1 2 3 4 bracket_level = 0 for token in tokens[i - 1 :: -1]: if token[1] == ":": return True if token[1] == "(": return False if token[1] == "]": bracket_level += 1 elif token[1] == "[": bracket_level -= 1 elif token[1] == ",": if not bracket_level: return False elif token[1] in (".", "..."): continue elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL): return False return False
Example #2
Source File: strings.py From python-netsurv with MIT License | 6 votes |
def process_tokens(self, tokens): encoding = "ascii" for i, (tok_type, token, start, _, line) in enumerate(tokens): if tok_type == tokenize.ENCODING: # this is always the first token processed encoding = token elif tok_type == tokenize.STRING: # 'token' is the whole un-parsed token; we can look at the start # of it to see whether it's a raw or unicode string etc. self.process_string_token(token, start[0]) # We figure the next token, ignoring comments & newlines: j = i + 1 while j < len(tokens) and tokens[j].type in ( tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT, ): j += 1 next_token = tokens[j] if j < len(tokens) else None if encoding != "ascii": # We convert `tokenize` character count into a byte count, # to match with astroid `.col_offset` start = (start[0], len(line[: start[1]].encode(encoding))) self.string_tokens[start] = (str_eval(token), next_token)
Example #3
Source File: format.py From python-netsurv with MIT License | 6 votes |
def _has_valid_type_annotation(self, tokens, i): """Extended check of PEP-484 type hint presence""" if not self._inside_brackets("("): return False # token_info # type string start end line # 0 1 2 3 4 bracket_level = 0 for token in tokens[i - 1 :: -1]: if token[1] == ":": return True if token[1] == "(": return False if token[1] == "]": bracket_level += 1 elif token[1] == "[": bracket_level -= 1 elif token[1] == ",": if not bracket_level: return False elif token[1] in (".", "..."): continue elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL): return False return False
Example #4
Source File: minification.py From shellsploit-library with MIT License | 6 votes |
def remove_docstrings(tokens): """ Removes docstrings from *tokens* which is expected to be a list equivalent of `tokenize.generate_tokens()` (so we can update in-place). """ prev_tok_type = None for index, tok in enumerate(tokens): token_type = tok[0] if token_type == tokenize.STRING: if prev_tok_type == tokenize.INDENT: # Definitely a docstring tokens[index][1] = '' # Remove it # Remove the leftover indentation and newline: tokens[index - 1][1] = '' tokens[index - 2][1] = '' elif prev_tok_type == tokenize.NL: # This captures whole-module docstrings: if tokens[index + 1][0] == tokenize.NEWLINE: tokens[index][1] = '' # Remove the trailing newline: tokens[index + 1][1] = '' prev_tok_type = token_type
Example #5
Source File: autopep8.py From python-netsurv with MIT License | 6 votes |
def _find_logical(source_lines): # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True parens = 0 for t in generate_tokens(''.join(source_lines)): if t[0] in [tokenize.COMMENT, tokenize.DEDENT, tokenize.INDENT, tokenize.NL, tokenize.ENDMARKER]: continue if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: last_newline = True logical_end.append((t[3][0] - 1, t[2][1])) continue if last_newline and not parens: logical_start.append((t[2][0] - 1, t[2][1])) last_newline = False if t[0] == tokenize.OP: if t[1] in '([{': parens += 1 elif t[1] in '}])': parens -= 1 return (logical_start, logical_end)
Example #6
Source File: autopep8.py From PyDev.Debugger with Eclipse Public License 1.0 | 6 votes |
def _find_logical(source_lines): # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True parens = 0 for t in generate_tokens(''.join(source_lines)): if t[0] in [tokenize.COMMENT, tokenize.DEDENT, tokenize.INDENT, tokenize.NL, tokenize.ENDMARKER]: continue if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: last_newline = True logical_end.append((t[3][0] - 1, t[2][1])) continue if last_newline and not parens: logical_start.append((t[2][0] - 1, t[2][1])) last_newline = False if t[0] == tokenize.OP: if t[1] in '([{': parens += 1 elif t[1] in '}])': parens -= 1 return (logical_start, logical_end)
Example #7
Source File: strings.py From python-netsurv with MIT License | 6 votes |
def process_tokens(self, tokens): encoding = "ascii" for i, (tok_type, token, start, _, line) in enumerate(tokens): if tok_type == tokenize.ENCODING: # this is always the first token processed encoding = token elif tok_type == tokenize.STRING: # 'token' is the whole un-parsed token; we can look at the start # of it to see whether it's a raw or unicode string etc. self.process_string_token(token, start[0]) # We figure the next token, ignoring comments & newlines: j = i + 1 while j < len(tokens) and tokens[j].type in ( tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT, ): j += 1 next_token = tokens[j] if j < len(tokens) else None if encoding != "ascii": # We convert `tokenize` character count into a byte count, # to match with astroid `.col_offset` start = (start[0], len(line[: start[1]].encode(encoding))) self.string_tokens[start] = (str_eval(token), next_token)
Example #8
Source File: pygettext.py From oss-ftp with MIT License | 6 votes |
def __openseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == ')': # We've seen the last of the translatable strings. Record the # line number of the first line of the strings and update the list # of messages seen. Reset state for the next batch. If there # were no strings inside _(), then just ignore this entry. if self.__data: self.__addentry(EMPTYSTRING.join(self.__data)) self.__state = self.__waiting elif ttype == tokenize.STRING: self.__data.append(safe_eval(tstring)) elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]: # warn if we see anything else than STRING or whitespace print >> sys.stderr, _( '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' ) % { 'token': tstring, 'file': self.__curfile, 'lineno': self.__lineno } self.__state = self.__waiting
Example #9
Source File: pygettext.py From oss-ftp with MIT License | 6 votes |
def __waiting(self, ttype, tstring, lineno): opts = self.__options # Do docstring extractions, if enabled if opts.docstrings and not opts.nodocstrings.get(self.__curfile): # module docstring? if self.__freshmodule: if ttype == tokenize.STRING: self.__addentry(safe_eval(tstring), lineno, isdocstring=1) self.__freshmodule = 0 elif ttype not in (tokenize.COMMENT, tokenize.NL): self.__freshmodule = 0 return # class docstring? if ttype == tokenize.NAME and tstring in ('class', 'def'): self.__state = self.__suiteseen return if ttype == tokenize.NAME and tstring in opts.keywords: self.__state = self.__keywordseen
Example #10
Source File: test_asttokens.py From asttokens with Apache License 2.0 | 6 votes |
def test_tokenizing(self): # Test that we produce meaningful tokens on initialization. source = "import re # comment\n\nfoo = 'bar'\n" atok = asttokens.ASTTokens(source) self.assertEqual(atok.text, source) self.assertEqual([str(t) for t in atok.tokens], [ "NAME:'import'", "NAME:'re'", "COMMENT:'# comment'", "NEWLINE:'\\n'", "NL:'\\n'", "NAME:'foo'", "OP:'='", 'STRING:"\'bar\'"', "NEWLINE:'\\n'", "ENDMARKER:''" ]) self.assertEqual(atok.tokens[5].type, token.NAME) self.assertEqual(atok.tokens[5].string, 'foo') self.assertEqual(atok.tokens[5].index, 5) self.assertEqual(atok.tokens[5].startpos, 22) self.assertEqual(atok.tokens[5].endpos, 25)
Example #11
Source File: asttokens.py From asttokens with Apache License 2.0 | 5 votes |
def next_token(self, tok, include_extra=False): """ Returns the next token after the given one. If include_extra is True, includes non-coding tokens from the tokenize module, such as NL and COMMENT. """ i = tok.index + 1 if not include_extra: while is_non_coding_token(self._tokens[i].type): i += 1 return self._tokens[i]
Example #12
Source File: obfuscate.py From shellsploit-library with MIT License | 5 votes |
def insert_in_next_line(tokens, index, string): """ Inserts the given string after the next newline inside tokens starting at *tokens[index]*. Indents must be a list of indentation tokens that will preceeed the insert (can be an empty list). """ tokenized_string = token_utils.listified_tokenizer(string) for i, tok in list(enumerate(tokens[index:])): token_type = tok[0] if token_type in [tokenize.NL, tokenize.NEWLINE]: for count, item in enumerate(tokenized_string): tokens.insert(index + count + i + 1, item) break
Example #13
Source File: pep8.py From cadquery-freecad-module with GNU Lesser General Public License v3.0 | 5 votes |
def whitespace_before_comment(logical_line, tokens): r"""Separate inline comments by at least two spaces. An inline comment is a comment on the same line as a statement. Inline comments should be separated by at least two spaces from the statement. They should start with a # and a single space. Each line of a block comment starts with a # and a single space (unless it is indented text inside the comment). Okay: x = x + 1 # Increment x Okay: x = x + 1 # Increment x Okay: # Block comment E261: x = x + 1 # Increment x E262: x = x + 1 #Increment x E262: x = x + 1 # Increment x E265: #Block comment E266: ### Block comment """ prev_end = (0, 0) for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: inline_comment = line[:start[1]].strip() if inline_comment: if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: yield (prev_end, "E261 at least two spaces before inline comment") symbol, sp, comment = text.partition(' ') bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') if inline_comment: if bad_prefix or comment[:1] in WHITESPACE: yield start, "E262 inline comment should start with '# '" elif bad_prefix and (bad_prefix != '!' or start[0] > 1): if bad_prefix != '#': yield start, "E265 block comment should start with '# '" elif comment: yield start, "E266 too many leading '#' for block comment" elif token_type != tokenize.NL: prev_end = end
Example #14
Source File: asttokens.py From asttokens with Apache License 2.0 | 5 votes |
def prev_token(self, tok, include_extra=False): """ Returns the previous token before the given one. If include_extra is True, includes non-coding tokens from the tokenize module, such as NL and COMMENT. """ i = tok.index - 1 if not include_extra: while is_non_coding_token(self._tokens[i].type): i -= 1 return self._tokens[i]
Example #15
Source File: inspect.py From ironpython3 with Apache License 2.0 | 5 votes |
def tokeneater(self, type, token, srowcol, erowcol, line): if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srowcol[0] if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #16
Source File: offset_token.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def Untokenize(offset_tokens): """Return the string representation of an iterable of OffsetTokens.""" # Make a copy. Don't modify the original. offset_tokens = collections.deque(offset_tokens) # Strip leading NL tokens. while offset_tokens[0].type == tokenize.NL: offset_tokens.popleft() # Strip leading vertical whitespace. first_token = offset_tokens.popleft() # Take care not to modify the existing token. Create a new one in its place. first_token = OffsetToken(first_token.type, first_token.string, (0, first_token.offset[1])) offset_tokens.appendleft(first_token) # Convert OffsetTokens to tokenize tokens. tokenize_tokens = [] row = 1 col = 0 for t in offset_tokens: offset_row, offset_col = t.offset if offset_row == 0: col += offset_col else: row += offset_row col = offset_col tokenize_tokens.append((t.type, t.string, (row, col), (row, col), None)) # tokenize can't handle whitespace before line continuations. # So add a space. return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
Example #17
Source File: asttokens.py From asttokens with Apache License 2.0 | 5 votes |
def token_range(self, first_token, last_token, include_extra=False): """ Yields all tokens in order from first_token through and including last_token. If include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT. """ for i in xrange(first_token.index, last_token.index + 1): if include_extra or not is_non_coding_token(self._tokens[i].type): yield self._tokens[i]
Example #18
Source File: asttokens.py From asttokens with Apache License 2.0 | 5 votes |
def get_tokens(self, node, include_extra=False): """ Yields all tokens making up the given node. If include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT. """ return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
Example #19
Source File: test_asttokens.py From asttokens with Apache License 2.0 | 5 votes |
def test_coding_declaration(self): """ASTTokens should be able to parse a string with a coding declaration.""" # In Python 2, a unicode string with a coding declaration is a SyntaxError, but we should be # able to parse a byte string with a coding declaration (as long as its utf-8 compatible). atok = asttokens.ASTTokens(str("# coding: ascii\n1\n"), parse=True) self.assertEqual([str(t) for t in atok.tokens], [ "COMMENT:'# coding: ascii'", "NL:'\\n'", "NUMBER:'1'", "NEWLINE:'\\n'", "ENDMARKER:''" ])
Example #20
Source File: snippet.py From Jandroid with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _SnippetizeNode(node, tokens): # The parser module gives a syntax tree that discards comments, # non-terminating newlines, and whitespace information. Use the tokens given # by the tokenize module to annotate the syntax tree with the information # needed to exactly reproduce the original source code. node_type = node[0] if node_type >= token.NT_OFFSET: # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) else: # Token. grabbed_tokens = [] while tokens and ( tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): grabbed_tokens.append(tokens.popleft()) # parser has 2 NEWLINEs right before the end. # tokenize has 0 or 1 depending on if the file has one. # Create extra nodes without consuming tokens to account for this. if node_type == token.NEWLINE: for tok in tokens: if tok.type == token.ENDMARKER: return TokenSnippet(node_type, grabbed_tokens) if tok.type != token.DEDENT: break assert tokens[0].type == token.OP or node_type == tokens[0].type grabbed_tokens.append(tokens.popleft()) return TokenSnippet(node_type, grabbed_tokens)
Example #21
Source File: QuestParser.py From Pirates-Online-Rewritten with BSD 3-Clause "New" or "Revised" License | 5 votes |
def getLineOfTokens(gen): tokens = [] nextNeg = 0 token = gen.next() if token[0] == tokenize.ENDMARKER: return None while token[0] != tokenize.NEWLINE and token[0] != tokenize.NL: if token[0] == tokenize.COMMENT: pass elif token[0] == tokenize.OP and token[1] == '-': nextNeg = 1 elif token[0] == tokenize.NUMBER: if nextNeg: tokens.append(-eval(token[1])) nextNeg = 0 else: tokens.append(eval(token[1])) elif token[0] == tokenize.STRING: tokens.append(eval(token[1])) elif token[0] == tokenize.NAME: tokens.append(token[1]) else: notify.warning('Ignored token type: %s on line: %s' % (tokenize.tok_name[token[0]], token[2][0])) token = gen.next() return tokens
Example #22
Source File: reindent.py From python3_ios with BSD 3-Clause "New" or "Revised" License | 5 votes |
def tokeneater(self, type, token, slinecol, end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL): if type == NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? self.find_stmt = 1 elif type == INDENT: self.find_stmt = 1 self.level += 1 elif type == DEDENT: self.find_stmt = 1 self.level -= 1 elif type == COMMENT: if self.find_stmt: self.stats.append((slinecol[0], -1)) # but we're still looking for a new stmt, so leave # find_stmt alone elif type == NL: pass elif self.find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. self.find_stmt = 0 if line: # not endmarker self.stats.append((slinecol[0], self.level)) # Count number of leading blanks.
Example #23
Source File: inspect.py From pmatic with GNU General Public License v2.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #24
Source File: reindent.py From Upgrade-to-Python3 with Apache License 2.0 | 5 votes |
def tokeneater(self, type, token, slinecol, end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL): if type == NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? self.find_stmt = 1 elif type == INDENT: self.find_stmt = 1 self.level += 1 elif type == DEDENT: self.find_stmt = 1 self.level -= 1 elif type == COMMENT: if self.find_stmt: self.stats.append((slinecol[0], -1)) # but we're still looking for a new stmt, so leave # find_stmt alone elif type == NL: pass elif self.find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. self.find_stmt = 0 if line: # not endmarker self.stats.append((slinecol[0], self.level)) # Count number of leading blanks.
Example #25
Source File: pycodestyle.py From PyDev.Debugger with Eclipse Public License 1.0 | 5 votes |
def whitespace_before_comment(logical_line, tokens): r"""Separate inline comments by at least two spaces. An inline comment is a comment on the same line as a statement. Inline comments should be separated by at least two spaces from the statement. They should start with a # and a single space. Each line of a block comment starts with a # and a single space (unless it is indented text inside the comment). Okay: x = x + 1 # Increment x Okay: x = x + 1 # Increment x Okay: # Block comment E261: x = x + 1 # Increment x E262: x = x + 1 #Increment x E262: x = x + 1 # Increment x E265: #Block comment E266: ### Block comment """ prev_end = (0, 0) for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: inline_comment = line[:start[1]].strip() if inline_comment: if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: yield (prev_end, "E261 at least two spaces before inline comment") symbol, sp, comment = text.partition(' ') bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') if inline_comment: if bad_prefix or comment[:1] in WHITESPACE: yield start, "E262 inline comment should start with '# '" elif bad_prefix and (bad_prefix != '!' or start[0] > 1): if bad_prefix != '#': yield start, "E265 block comment should start with '# '" elif comment: yield start, "E266 too many leading '#' for block comment" elif token_type != tokenize.NL: prev_end = end
Example #26
Source File: reindent.py From oss-ftp with MIT License | 5 votes |
def tokeneater(self, type, token, (sline, scol), end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL):
Example #27
Source File: pt2html.py From pyx with GNU General Public License v2.0 | 5 votes |
def tokeneater(self, toktype, toktext, xxx_todo_changeme, xxx_todo_changeme1, line): (srow, scol) = xxx_todo_changeme (erow, ecol) = xxx_todo_changeme1 if toktype == token.ERRORTOKEN: raise RuntimeError("ErrorToken occured") if toktype in [token.NEWLINE, tokenize.NL]: self.output.write('\n') self.col = 0 else: # map token type to a color group if token.LPAR <= toktype and toktype <= token.OP: toktype = token.OP elif toktype == token.NAME and keyword.iskeyword(toktext): toktype = _KEYWORD # restore whitespace assert scol >= self.col self.output.write(" "*(scol-self.col)) try: tokclass = tokclasses[toktype] except KeyError: tokclass = None if self.tokclass is not None and tokclass != self.tokclass: self.output.write('</span>') if tokclass is not None and tokclass != self.tokclass: self.output.write('<span class="%s">' % tokclass) self.output.write(cgi.escape(toktext)) self.tokclass = tokclass # calculate new column position self.col = scol + len(toktext) newline = toktext.rfind("\n") if newline != -1: self.col = len(toktext) - newline - 1
Example #28
Source File: inspect.py From jawfish with MIT License | 5 votes |
def tokeneater(self, type, token, srowcol, erowcol, line): if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srowcol[0] if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #29
Source File: inspect.py From BinderFilter with MIT License | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #30
Source File: inspect.py From Computable with MIT License | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock