Python elasticsearch_dsl.Keyword() Examples
The following are 8
code examples of elasticsearch_dsl.Keyword().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
elasticsearch_dsl
, or try the search function
.
Example #1
Source File: elasticsearch.py From qb with MIT License | 6 votes |
def create_doctype(index_name, similarity): if similarity == 'default': wiki_content_field = Text() qb_content_field = Text() else: wiki_content_field = Text(similarity=similarity) qb_content_field = Text(similarity=similarity) class Answer(Document): page = Text(fields={'raw': Keyword()}) wiki_content = wiki_content_field qb_content = qb_content_field class Meta: index = index_name return Answer
Example #2
Source File: elasticsearch.py From optimade-python-tools with MIT License | 6 votes |
def __init__( self, name, es_field: str = None, elastic_mapping_type: Field = None, length_quantity: "Quantity" = None, has_only_quantity: "Quantity" = None, nested_quantity: "Quantity" = None, ): self.name = name self.es_field = es_field if es_field is not None else name self.elastic_mapping_type = ( Keyword if elastic_mapping_type is None else elastic_mapping_type ) self.length_quantity = length_quantity self.has_only_quantity = has_only_quantity self.nested_quantity = nested_quantity
Example #3
Source File: elasticsearch.py From optimade-python-tools with MIT License | 6 votes |
def _query(self, quantity, o, value, nested=None): field = self._field(quantity, nested=nested) if o in _cmp_operators: return Q("range", **{field: {_cmp_operators[o]: value}}) if quantity.elastic_mapping_type == Text: query_type = "match" elif quantity.elastic_mapping_type in [Keyword, Integer]: query_type = "term" else: raise NotImplementedError("Quantity has unsupported ES field type") if o in ["=", ""]: return Q(query_type, **{field: value}) if o == "!=": return ~Q( query_type, **{field: value} ) # pylint: disable=invalid-unary-operand-type raise Exception("Unknown operator %s" % o)
Example #4
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 5 votes |
def test_properties_can_iterate_over_all_the_fields(): m = mapping.Mapping() m.field('f1', 'text', test_attr='f1', fields={'f2': Keyword(test_attr='f2')}) m.field('f3', Nested(test_attr='f3', properties={ 'f4': Text(test_attr='f4')})) assert {'f1', 'f2', 'f3', 'f4'} == {f.test_attr for f in m.properties._collect_fields()}
Example #5
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 5 votes |
def test_mapping_can_collect_multiple_analyzers(): a1 = analysis.analyzer( 'my_analyzer1', tokenizer='keyword', filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])], ) a2 = analysis.analyzer( 'my_analyzer2', tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])], ) m = mapping.Mapping() m.field('title', 'text', analyzer=a1, search_analyzer=a2) m.field( 'text', 'text', analyzer=a1, fields={ 'english': Text(analyzer=a1), 'unknown': Keyword(analyzer=a1, search_analyzer=a2), } ) assert { 'analyzer': { 'my_analyzer1': {'filter': ['lowercase', 'my_filter1'], 'tokenizer': 'keyword', 'type': 'custom'}, 'my_analyzer2': {'filter': ['my_filter2'], 'tokenizer': 'trigram', 'type': 'custom'}}, 'filter': { 'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'}, 'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'}}, 'tokenizer': {'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'}} } == m._collect_analysis()
Example #6
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 5 votes |
def test_resolve_field_can_resolve_multifields(): m = mapping.Mapping() m.field('title', 'text', fields={'keyword': Keyword()}) assert isinstance(m.resolve_field('title.keyword'), Keyword)
Example #7
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 5 votes |
def test_resolve_nested(): m = mapping.Mapping() m.field('n1', 'nested', properties={'n2': Nested(properties={'k1': Keyword()})}) m.field('k2', 'keyword') nested, field = m.resolve_nested('n1.n2.k1') assert nested == ['n1', 'n1.n2'] assert isinstance(field, Keyword) nested, field = m.resolve_nested('k2') assert nested == [] assert isinstance(field, Keyword)
Example #8
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 4 votes |
def test_mapping_can_collect_all_analyzers_and_normalizers(): a1 = analysis.analyzer('my_analyzer1', tokenizer='keyword', filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])], ) a2 = analysis.analyzer('english') a3 = analysis.analyzer('unknown_custom') a4 = analysis.analyzer('my_analyzer2', tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])], ) a5 = analysis.analyzer('my_analyzer3', tokenizer='keyword') n1 = analysis.normalizer('my_normalizer1', filter=['lowercase'] ) n2 = analysis.normalizer('my_normalizer2', filter=['my_filter1', 'my_filter2', analysis.token_filter('my_filter3', 'stop', stopwords=['e', 'f'])] ) n3 = analysis.normalizer('unknown_custom') m = mapping.Mapping() m.field('title', 'text', analyzer=a1, fields={ 'english': Text(analyzer=a2), 'unknown': Keyword(search_analyzer=a3), } ) m.field('comments', Nested(properties={ 'author': Text(analyzer=a4) })) m.field('normalized_title', 'keyword', normalizer=n1) m.field('normalized_comment', 'keyword', normalizer=n2) m.field('unknown', 'keyword', normalizer=n3) m.meta('_all', analyzer=a5) assert { 'analyzer': { 'my_analyzer1': {'filter': ['lowercase', 'my_filter1'], 'tokenizer': 'keyword', 'type': 'custom'}, 'my_analyzer2': {'filter': ['my_filter2'], 'tokenizer': 'trigram', 'type': 'custom'}, 'my_analyzer3': {'tokenizer': 'keyword', 'type': 'custom'}, }, 'normalizer': { 'my_normalizer1': {'filter': ['lowercase'], 'type': 'custom'}, 'my_normalizer2': {'filter': ['my_filter1', 'my_filter2', 'my_filter3'], 'type': 'custom'}, }, 'filter': { 'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'}, 'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'}, 'my_filter3': {'stopwords': ['e', 'f'], 'type': 'stop'}, }, 'tokenizer': { 'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'}, } } == m._collect_analysis() assert json.loads(json.dumps(m.to_dict())) == m.to_dict()