Python pygments.token.Whitespace() Examples
The following are 6
code examples of pygments.token.Whitespace().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pygments.token
, or try the search function
.
Example #1
Source File: test_sql.py From pygments with BSD 2-Clause "Simplified" License | 5 votes |
def _assert_are_tokens_of_type(lexer, examples, expected_token_type): for test_number, example in enumerate(examples.split(), 1): token_count = 0 for token_type, token_value in lexer.get_tokens(example): if token_type != Whitespace: token_count += 1 assert token_type == expected_token_type, \ 'token_type #%d for %s is be %s but must be %s' % \ (test_number, token_value, token_type, expected_token_type) assert token_count == 1, \ '%s must yield exactly 1 token instead of %d' % \ (example, token_count)
Example #2
Source File: test_sql.py From pygments with BSD 2-Clause "Simplified" License | 5 votes |
def _assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline): actual_tokens = tuple(lexer.get_tokens(text)) if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): actual_tokens = tuple(actual_tokens[:-1]) assert expected_tokens_without_trailing_newline == actual_tokens, \ 'text must yield expected tokens: %s' % text
Example #3
Source File: test_basic.py From pygments with BSD 2-Clause "Simplified" License | 5 votes |
def assert_are_tokens_of_type(lexer, examples, expected_token_type): for test_number, example in enumerate(examples.split(), 1): token_count = 0 for token_type, token_value in lexer.get_tokens(example): if token_type != Whitespace: token_count += 1 assert token_type == expected_token_type, \ 'token_type #%d for %s is be %s but must be %s' % \ (test_number, token_value, token_type, expected_token_type) assert token_count == 1, \ '%s must yield exactly 1 token instead of %d' % (example, token_count)
Example #4
Source File: test_basic.py From pygments with BSD 2-Clause "Simplified" License | 5 votes |
def assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline): actual_tokens = tuple(lexer.get_tokens(text)) if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): actual_tokens = tuple(actual_tokens[:-1]) assert expected_tokens_without_trailing_newline == actual_tokens, \ 'text must yield expected tokens: %s' % text
Example #5
Source File: pygments_sh.py From Turing with MIT License | 5 votes |
def _get_format(self, token): """ Returns a QTextCharFormat for token or None. """ if token == Whitespace: return self.editor.whitespaces_foreground if token in self._formats: return self._formats[token] result = self._get_format_from_style(token, self._style) self._formats[token] = result return result
Example #6
Source File: pygments_sh.py From Turing with MIT License | 4 votes |
def highlight_block(self, text, block): """ Highlights the block using a pygments lexer. :param text: text of the block to highlith :param block: block to highlight """ if self.color_scheme.name != self._pygments_style: self._pygments_style = self.color_scheme.name self._update_style() original_text = text if self.editor and self._lexer and self.enabled: if block.blockNumber(): prev_data = self._prev_block.userData() if prev_data: if hasattr(prev_data, "syntax_stack"): self._lexer._saved_state_stack = prev_data.syntax_stack elif hasattr(self._lexer, '_saved_state_stack'): del self._lexer._saved_state_stack # Lex the text using Pygments index = 0 usd = block.userData() if usd is None: usd = TextBlockUserData() block.setUserData(usd) tokens = list(self._lexer.get_tokens(text)) for token, text in tokens: length = len(text) fmt = self._get_format(token) if token in [Token.Literal.String, Token.Literal.String.Doc, Token.Comment]: fmt.setObjectType(fmt.UserObject) self.setFormat(index, length, fmt) index += length if hasattr(self._lexer, '_saved_state_stack'): setattr(usd, "syntax_stack", self._lexer._saved_state_stack) # Clean up for the next go-round. del self._lexer._saved_state_stack # spaces text = original_text expression = QRegExp(r'\s+') index = expression.indexIn(text, 0) while index >= 0: index = expression.pos(0) length = len(expression.cap(0)) self.setFormat(index, length, self._get_format(Whitespace)) index = expression.indexIn(text, index + length) self._prev_block = block