Python sqlparse.tokens.Wildcard() Examples
The following are 12
code examples of sqlparse.tokens.Wildcard().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sqlparse.tokens
, or try the search function
.
Example #1
Source File: grouping.py From SublimeText-SQLTools with GNU General Public License v3.0 | 7 votes |
def group_identifier_list(tlist): m_role = T.Keyword, ('null', 'role') sqlcls = (sql.Function, sql.Case, sql.Identifier, sql.Comparison, sql.IdentifierList, sql.Operation) ttypes = (T_NUMERICAL + T_STRING + T_NAME + (T.Keyword, T.Comment, T.Wildcard)) def match(token): return token.match(T.Punctuation, ',') def valid(token): return imt(token, i=sqlcls, m=m_role, t=ttypes) def post(tlist, pidx, tidx, nidx): return pidx, nidx valid_prev = valid_next = valid _group(tlist, sql.IdentifierList, match, valid_prev, valid_next, post, extend=True)
Example #2
Source File: grouping.py From SublimeText-SQLTools with GNU General Public License v3.0 | 6 votes |
def group_period(tlist): def match(token): return token.match(T.Punctuation, '.') def valid_prev(token): sqlcls = sql.SquareBrackets, sql.Identifier ttypes = T.Name, T.String.Symbol return imt(token, i=sqlcls, t=ttypes) def valid_next(token): # issue261, allow invalid next token return True def post(tlist, pidx, tidx, nidx): # next_ validation is being performed here. issue261 sqlcls = sql.SquareBrackets, sql.Function ttypes = T.Name, T.String.Symbol, T.Wildcard next_ = tlist[nidx] if nidx is not None else None valid_next = imt(next_, i=sqlcls, t=ttypes) return (pidx, nidx) if valid_next else (pidx, tidx) _group(tlist, sql.Identifier, match, valid_prev, valid_next, post)
Example #3
Source File: grouping.py From SublimeText-SQLTools with GNU General Public License v3.0 | 6 votes |
def group_operator(tlist): ttypes = T_NUMERICAL + T_STRING + T_NAME sqlcls = (sql.SquareBrackets, sql.Parenthesis, sql.Function, sql.Identifier, sql.Operation) def match(token): return imt(token, t=(T.Operator, T.Wildcard)) def valid(token): return imt(token, i=sqlcls, t=ttypes) def post(tlist, pidx, tidx, nidx): tlist[tidx].ttype = T.Operator return pidx, nidx valid_prev = valid_next = valid _group(tlist, sql.Operation, match, valid_prev, valid_next, post, extend=False)
Example #4
Source File: sql.py From codenn with MIT License | 6 votes |
def _get_first_name(self, idx=None, reverse=False, keywords=False): """Returns the name of the first token with a name""" if idx and not isinstance(idx, int): idx = self.token_index(idx) + 1 tokens = self.tokens[idx:] if idx else self.tokens tokens = reversed(tokens) if reverse else tokens types = [T.Name, T.Wildcard, T.String.Symbol] if keywords: types.append(T.Keyword) for tok in tokens: if tok.ttype in types: return self._remove_quotes(tok.value) elif isinstance(tok, Identifier) or isinstance(tok, Function): return tok.get_name() return None
Example #5
Source File: sql.py From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _get_first_name(self, idx=None, reverse=False, keywords=False): """Returns the name of the first token with a name""" if idx and not isinstance(idx, int): idx = self.token_index(idx) + 1 tokens = self.tokens[idx:] if idx else self.tokens tokens = reversed(tokens) if reverse else tokens types = [T.Name, T.Wildcard, T.String.Symbol] if keywords: types.append(T.Keyword) for tok in tokens: if tok.ttype in types: return self._remove_quotes(tok.value) elif isinstance(tok, Identifier) or isinstance(tok, Function): return tok.get_name() return None
Example #6
Source File: sql.py From SublimeText-SQLTools with GNU General Public License v3.0 | 5 votes |
def _get_first_name(self, idx=None, reverse=False, keywords=False): """Returns the name of the first token with a name""" tokens = self.tokens[idx:] if idx else self.tokens tokens = reversed(tokens) if reverse else tokens types = [T.Name, T.Wildcard, T.String.Symbol] if keywords: types.append(T.Keyword) for token in tokens: if token.ttype in types: return remove_quotes(token.value) elif isinstance(token, (Identifier, Function)): return token.get_name()
Example #7
Source File: sql.py From SublimeText-SQLTools with GNU General Public License v3.0 | 5 votes |
def is_wildcard(self): """Return ``True`` if this identifier contains a wildcard.""" _, token = self.token_next_by(t=T.Wildcard) return token is not None
Example #8
Source File: sql.py From codenn with MIT License | 5 votes |
def is_wildcard(self): """Return ``True`` if this identifier contains a wildcard.""" token = self.token_next_by_type(0, T.Wildcard) return token is not None
Example #9
Source File: sql.py From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License | 5 votes |
def is_wildcard(self): """Return ``True`` if this identifier contains a wildcard.""" token = self.token_next_by_type(0, T.Wildcard) return token is not None
Example #10
Source File: grouping.py From codenn with MIT License | 4 votes |
def group_identifier_list(tlist): [group_identifier_list(sgroup) for sgroup in tlist.get_sublists() if not isinstance(sgroup, sql.IdentifierList)] idx = 0 # Allowed list items fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function, sql.Case)), lambda t: t.is_whitespace(), lambda t: t.ttype == T.Name, lambda t: t.ttype == T.Wildcard, lambda t: t.match(T.Keyword, 'null'), lambda t: t.match(T.Keyword, 'role'), lambda t: t.ttype == T.Number.Integer, lambda t: t.ttype == T.String.Single, lambda t: t.ttype == T.Name.Placeholder, lambda t: t.ttype == T.Keyword, lambda t: isinstance(t, sql.Comparison), lambda t: isinstance(t, sql.Comment), lambda t: t.ttype == T.Comment.Multiline, ] tcomma = tlist.token_next_match(idx, T.Punctuation, ',') start = None while tcomma is not None: before = tlist.token_prev(tcomma) after = tlist.token_next(tcomma) # Check if the tokens around tcomma belong to a list bpassed = apassed = False for func in fend1_funcs: if before is not None and func(before): bpassed = True if after is not None and func(after): apassed = True if not bpassed or not apassed: # Something's wrong here, skip ahead to next "," start = None tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1, T.Punctuation, ',') else: if start is None: start = before next_ = tlist.token_next(after) if next_ is None or not next_.match(T.Punctuation, ','): # Reached the end of the list tokens = tlist.tokens_between(start, after) group = tlist.group_tokens(sql.IdentifierList, tokens) start = None tcomma = tlist.token_next_match(tlist.token_index(group) + 1, T.Punctuation, ',') else: tcomma = next_
Example #11
Source File: grouping.py From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License | 4 votes |
def group_identifier_list(tlist): [group_identifier_list(sgroup) for sgroup in tlist.get_sublists() if not isinstance(sgroup, sql.IdentifierList)] # Allowed list items fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function, sql.Case)), lambda t: t.is_whitespace(), lambda t: t.ttype == T.Name, lambda t: t.ttype == T.Wildcard, lambda t: t.match(T.Keyword, 'null'), lambda t: t.match(T.Keyword, 'role'), lambda t: t.ttype == T.Number.Integer, lambda t: t.ttype == T.String.Single, lambda t: t.ttype == T.Name.Placeholder, lambda t: t.ttype == T.Keyword, lambda t: isinstance(t, sql.Comparison), lambda t: isinstance(t, sql.Comment), lambda t: t.ttype == T.Comment.Multiline, ] tcomma = tlist.token_next_match(0, T.Punctuation, ',') start = None while tcomma is not None: # Go back one idx to make sure to find the correct tcomma idx = tlist.token_index(tcomma) before = tlist.token_prev(idx) after = tlist.token_next(idx) # Check if the tokens around tcomma belong to a list bpassed = apassed = False for func in fend1_funcs: if before is not None and func(before): bpassed = True if after is not None and func(after): apassed = True if not bpassed or not apassed: # Something's wrong here, skip ahead to next "," start = None tcomma = tlist.token_next_match(idx + 1, T.Punctuation, ',') else: if start is None: start = before after_idx = tlist.token_index(after, start=idx) next_ = tlist.token_next(after_idx) if next_ is None or not next_.match(T.Punctuation, ','): # Reached the end of the list tokens = tlist.tokens_between(start, after) group = tlist.group_tokens(sql.IdentifierList, tokens) start = None tcomma = tlist.token_next_match(tlist.token_index(group) + 1, T.Punctuation, ',') else: tcomma = next_
Example #12
Source File: sql_metadata.py From sql-metadata with MIT License | 4 votes |
def get_query_columns(query: str) -> List[str]: """ :type query str :rtype: list[str] """ columns = [] last_keyword = None last_token = None # print(preprocess_query(query)) # these keywords should not change the state of a parser # and not "reset" previously found SELECT keyword keywords_ignored = ['AS', 'AND', 'OR', 'IN', 'IS', 'NOT', 'NOT NULL', 'LIKE', 'CASE', 'WHEN'] # these function should be ignored # and not "reset" previously found SELECT keyword functions_ignored = ['COUNT', 'MIN', 'MAX', 'FROM_UNIXTIME', 'DATE_FORMAT', 'CAST', 'CONVERT'] for token in get_query_tokens(query): if token.is_keyword and token.value.upper() not in keywords_ignored: # keep the name of the last keyword, e.g. SELECT, FROM, WHERE, (ORDER) BY last_keyword = token.value.upper() # print('keyword', last_keyword) elif token.ttype is Name: # analyze the name tokens, column names and where condition values if last_keyword in ['SELECT', 'WHERE', 'ORDER BY', 'ON'] \ and last_token.value.upper() not in ['AS']: # print(last_keyword, last_token, token.value) if token.value.upper() not in functions_ignored: if str(last_token) == '.': # print('DOT', last_token, columns[-1]) # we have table.column notation example # append column name to the last entry of columns # as it is a table name in fact table_name = columns[-1] columns[-1] = '{}.{}'.format(table_name, token) else: columns.append(str(token.value)) elif last_keyword in ['INTO'] and last_token.ttype is Punctuation: # INSERT INTO `foo` (col1, `col2`) VALUES (..) # print(last_keyword, token, last_token) columns.append(str(token.value).strip('`')) elif token.ttype is Wildcard: # handle * wildcard in SELECT part, but ignore count(*) # print(last_keyword, last_token, token.value) if last_keyword == 'SELECT' and last_token.value != '(': if str(last_token) == '.': # handle SELECT foo.* table_name = columns[-1] columns[-1] = '{}.{}'.format(table_name, str(token)) else: columns.append(str(token.value)) last_token = token return unique(columns)