Python tokenize.ENCODING Examples
The following are 7
code examples of tokenize.ENCODING().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tokenize
, or try the search function
.
Example #1
Source File: strings.py From python-netsurv with MIT License | 6 votes |
def process_tokens(self, tokens): encoding = "ascii" for i, (tok_type, token, start, _, line) in enumerate(tokens): if tok_type == tokenize.ENCODING: # this is always the first token processed encoding = token elif tok_type == tokenize.STRING: # 'token' is the whole un-parsed token; we can look at the start # of it to see whether it's a raw or unicode string etc. self.process_string_token(token, start[0]) # We figure the next token, ignoring comments & newlines: j = i + 1 while j < len(tokens) and tokens[j].type in ( tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT, ): j += 1 next_token = tokens[j] if j < len(tokens) else None if encoding != "ascii": # We convert `tokenize` character count into a byte count, # to match with astroid `.col_offset` start = (start[0], len(line[: start[1]].encode(encoding))) self.string_tokens[start] = (str_eval(token), next_token)
Example #2
Source File: strings.py From python-netsurv with MIT License | 6 votes |
def process_tokens(self, tokens): encoding = "ascii" for i, (tok_type, token, start, _, line) in enumerate(tokens): if tok_type == tokenize.ENCODING: # this is always the first token processed encoding = token elif tok_type == tokenize.STRING: # 'token' is the whole un-parsed token; we can look at the start # of it to see whether it's a raw or unicode string etc. self.process_string_token(token, start[0]) # We figure the next token, ignoring comments & newlines: j = i + 1 while j < len(tokens) and tokens[j].type in ( tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT, ): j += 1 next_token = tokens[j] if j < len(tokens) else None if encoding != "ascii": # We convert `tokenize` character count into a byte count, # to match with astroid `.col_offset` start = (start[0], len(line[: start[1]].encode(encoding))) self.string_tokens[start] = (str_eval(token), next_token)
Example #3
Source File: unittest_checker_format.py From python-netsurv with MIT License | 5 votes |
def test_encoding_token(self): """Make sure the encoding token doesn't change the checker's behavior _tokenize_str doesn't produce an encoding token, but reading a file does """ with self.assertNoMessages(): encoding_token = tokenize.TokenInfo( tokenize.ENCODING, "utf-8", (0, 0), (0, 0), "" ) tokens = [encoding_token] + _tokenize_str( "if (\n None):\n pass\n" ) self.checker.process_tokens(tokens)
Example #4
Source File: unittest_checker_format.py From python-netsurv with MIT License | 5 votes |
def test_encoding_token(self): """Make sure the encoding token doesn't change the checker's behavior _tokenize_str doesn't produce an encoding token, but reading a file does """ with self.assertNoMessages(): encoding_token = tokenize.TokenInfo( tokenize.ENCODING, "utf-8", (0, 0), (0, 0), "" ) tokens = [encoding_token] + _tokenize_str( "if (\n None):\n pass\n" ) self.checker.process_tokens(tokens)
Example #5
Source File: htmlizer.py From Safejumper-for-Desktop with GNU General Public License v2.0 | 5 votes |
def printtoken(self, type, token, sCoordinates, eCoordinates, line): if hasattr(tokenize, "ENCODING") and type == tokenize.ENCODING: self.encoding = token return (srow, scol) = sCoordinates (erow, ecol) = eCoordinates if self.currentLine < srow: self.writer('\n'*(srow-self.currentLine)) self.currentLine, self.currentCol = srow, 0 self.writer(' '*(scol-self.currentCol)) if self.lastIdentifier: type = "identifier" self.parameters = 1 elif type == tokenize.NAME: if keyword.iskeyword(token): type = 'keyword' else: if self.parameters: type = 'parameter' else: type = 'variable' else: type = tokenize.tok_name.get(type).lower() self.writer(token, type) self.currentCol = ecol self.currentLine += token.count('\n') if self.currentLine != erow: self.currentCol = 0 self.lastIdentifier = token in ('def', 'class') if token == ':': self.parameters = 0
Example #6
Source File: htmlizer.py From learn_python3_spider with MIT License | 5 votes |
def printtoken(self, type, token, sCoordinates, eCoordinates, line): if hasattr(tokenize, "ENCODING") and type == tokenize.ENCODING: self.encoding = token return if not isinstance(token, bytes): token = token.encode(self.encoding) (srow, scol) = sCoordinates (erow, ecol) = eCoordinates if self.currentLine < srow: self.writer(b'\n' * (srow-self.currentLine)) self.currentLine, self.currentCol = srow, 0 self.writer(b' ' * (scol-self.currentCol)) if self.lastIdentifier: type = "identifier" self.parameters = 1 elif type == tokenize.NAME: if keyword.iskeyword(token): type = 'keyword' else: if self.parameters: type = 'parameter' else: type = 'variable' else: type = tokenize.tok_name.get(type).lower() self.writer(token, type) self.currentCol = ecol self.currentLine += token.count(b'\n') if self.currentLine != erow: self.currentCol = 0 self.lastIdentifier = token in (b'def', b'class') if token == b':': self.parameters = 0
Example #7
Source File: _better_exceptions.py From loguru with MIT License | 4 votes |
def highlight(self, source): style = self._style row, column = 0, 0 output = "" for token in self.tokenize(source): type_, string, start, end, line = token if type_ == tokenize.NAME: if string in self._constants: color = style["constant"] elif keyword.iskeyword(string): color = style["keyword"] elif string in self._builtins: color = style["builtin"] else: color = style["identifier"] elif type_ == tokenize.OP: if string in self._punctation: color = style["punctuation"] else: color = style["operator"] elif type_ == tokenize.NUMBER: color = style["number"] elif type_ == tokenize.STRING: color = style["string"] elif type_ == tokenize.COMMENT: color = style["comment"] else: color = style["other"] start_row, start_column = start _, end_column = end if start_row != row: source = source[:column] row, column = start_row, 0 if type_ != tokenize.ENCODING: output += line[column:start_column] output += color.format(string) column = end_column output += source[column:] return output