Python elasticsearch_dsl.Nested() Examples
The following are 5
code examples of elasticsearch_dsl.Nested().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
elasticsearch_dsl
, or try the search function
.
Example #1
Source File: views.py From django-seeker with BSD 2-Clause "Simplified" License | 6 votes |
def get_field_sort(self, field_name): """ Given a field name, returns the field name that should be used for sorting. If a mapping defines a .raw sub-field, that is used, otherwise the field name itself is used if index=not_analyzed. """ if field_name.endswith('.raw'): return field_name if field_name in self.sort_fields: return self.sort_fields[field_name] if field_name in self.document._doc_type.mapping: dsl_field = self.document._doc_type.mapping[field_name] if isinstance(dsl_field, (dsl.Object, dsl.Nested)): return None if not isinstance(dsl_field, dsl.String): return field_name if 'raw' in dsl_field.fields: return '%s.raw' % field_name elif getattr(dsl_field, 'index', None) == 'not_analyzed': return field_name return None
Example #2
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 5 votes |
def test_properties_can_iterate_over_all_the_fields(): m = mapping.Mapping() m.field('f1', 'text', test_attr='f1', fields={'f2': Keyword(test_attr='f2')}) m.field('f3', Nested(test_attr='f3', properties={ 'f4': Text(test_attr='f4')})) assert {'f1', 'f2', 'f3', 'f4'} == {f.test_attr for f in m.properties._collect_fields()}
Example #3
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 5 votes |
def test_resolve_nested(): m = mapping.Mapping() m.field('n1', 'nested', properties={'n2': Nested(properties={'k1': Keyword()})}) m.field('k2', 'keyword') nested, field = m.resolve_nested('n1.n2.k1') assert nested == ['n1', 'n1.n2'] assert isinstance(field, Keyword) nested, field = m.resolve_nested('k2') assert nested == [] assert isinstance(field, Keyword)
Example #4
Source File: views.py From django-seeker with BSD 2-Clause "Simplified" License | 5 votes |
def get_field_highlight(self, field_name): if field_name in self.highlight_fields: return self.highlight_fields[field_name] if field_name in self.document._doc_type.mapping: dsl_field = self.document._doc_type.mapping[field_name] if isinstance(dsl_field, (dsl.Object, dsl.Nested)): return '%s.*' % field_name return field_name return None
Example #5
Source File: test_mapping.py From elasticsearch-dsl-py with Apache License 2.0 | 4 votes |
def test_mapping_can_collect_all_analyzers_and_normalizers(): a1 = analysis.analyzer('my_analyzer1', tokenizer='keyword', filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])], ) a2 = analysis.analyzer('english') a3 = analysis.analyzer('unknown_custom') a4 = analysis.analyzer('my_analyzer2', tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])], ) a5 = analysis.analyzer('my_analyzer3', tokenizer='keyword') n1 = analysis.normalizer('my_normalizer1', filter=['lowercase'] ) n2 = analysis.normalizer('my_normalizer2', filter=['my_filter1', 'my_filter2', analysis.token_filter('my_filter3', 'stop', stopwords=['e', 'f'])] ) n3 = analysis.normalizer('unknown_custom') m = mapping.Mapping() m.field('title', 'text', analyzer=a1, fields={ 'english': Text(analyzer=a2), 'unknown': Keyword(search_analyzer=a3), } ) m.field('comments', Nested(properties={ 'author': Text(analyzer=a4) })) m.field('normalized_title', 'keyword', normalizer=n1) m.field('normalized_comment', 'keyword', normalizer=n2) m.field('unknown', 'keyword', normalizer=n3) m.meta('_all', analyzer=a5) assert { 'analyzer': { 'my_analyzer1': {'filter': ['lowercase', 'my_filter1'], 'tokenizer': 'keyword', 'type': 'custom'}, 'my_analyzer2': {'filter': ['my_filter2'], 'tokenizer': 'trigram', 'type': 'custom'}, 'my_analyzer3': {'tokenizer': 'keyword', 'type': 'custom'}, }, 'normalizer': { 'my_normalizer1': {'filter': ['lowercase'], 'type': 'custom'}, 'my_normalizer2': {'filter': ['my_filter1', 'my_filter2', 'my_filter3'], 'type': 'custom'}, }, 'filter': { 'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'}, 'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'}, 'my_filter3': {'stopwords': ['e', 'f'], 'type': 'stop'}, }, 'tokenizer': { 'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'}, } } == m._collect_analysis() assert json.loads(json.dumps(m.to_dict())) == m.to_dict()