Python django.db.models.sql.compiler() Examples
The following are 24
code examples of django.db.models.sql.compiler().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
django.db.models.sql
, or try the search function
.
Example #1
Source File: query.py From python2017 with MIT License | 6 votes |
def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler:execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql/compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache)
Example #2
Source File: query.py From python2017 with MIT License | 6 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if not query.extra_select and not query.annotation_select: for row in compiler.results_iter(): yield tuple(row) else: field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names if queryset._fields: # Reorder according to fields. fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields] else: fields = names for row in compiler.results_iter(): data = dict(zip(names, row)) yield tuple(data[f] for f in fields)
Example #3
Source File: query.py From bioforum with MIT License | 6 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) ) return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
Example #4
Source File: query.py From openhgsenti with Apache License 2.0 | 6 votes |
def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler:execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql/compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache)
Example #5
Source File: query.py From bioforum with MIT License | 6 votes |
def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler:execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache)
Example #6
Source File: query.py From openhgsenti with Apache License 2.0 | 6 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if not query.extra_select and not query.annotation_select: for row in compiler.results_iter(): yield tuple(row) else: field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names if queryset._fields: # Reorder according to fields. fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields] else: fields = names for row in compiler.results_iter(): data = dict(zip(names, row)) yield tuple(data[f] for f in fields)
Example #7
Source File: query.py From python with Apache License 2.0 | 6 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if not query.extra_select and not query.annotation_select: for row in compiler.results_iter(): yield tuple(row) else: field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names if queryset._fields: # Reorder according to fields. fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields] else: fields = names for row in compiler.results_iter(): data = dict(zip(names, row)) yield tuple(data[f] for f in fields)
Example #8
Source File: query.py From GTDWeb with GNU General Public License v2.0 | 6 votes |
def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler:execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql/compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache)
Example #9
Source File: query.py From python with Apache License 2.0 | 6 votes |
def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler:execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql/compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache)
Example #10
Source File: query.py From python with Apache License 2.0 | 5 votes |
def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter(): yield row[0]
Example #11
Source File: query.py From python2017 with MIT License | 5 votes |
def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter(): yield row[0]
Example #12
Source File: query.py From python2017 with MIT License | 5 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names for row in compiler.results_iter(): yield dict(zip(names, row))
Example #13
Source File: query.py From python2017 with MIT License | 5 votes |
def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql(chunked_fetch=self.chunked_fetch) select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] related_populators = get_related_populators(klass_info, select, db) for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) if related_populators: for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model, if there are any if queryset._known_related_objects: for field, rel_objs in queryset._known_related_objects.items(): # Avoid overwriting objects loaded e.g. by select_related if hasattr(obj, field.get_cache_name()): continue pk = getattr(obj, field.get_attname()) try: rel_obj = rel_objs[pk] except KeyError: pass # may happen in qs1 | qs2 scenarios else: setattr(obj, field.name, rel_obj) yield obj
Example #14
Source File: query.py From openhgsenti with Apache License 2.0 | 5 votes |
def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter(): yield row[0]
Example #15
Source File: query.py From openhgsenti with Apache License 2.0 | 5 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names for row in compiler.results_iter(): yield dict(zip(names, row))
Example #16
Source File: query.py From python with Apache License 2.0 | 5 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names for row in compiler.results_iter(): yield dict(zip(names, row))
Example #17
Source File: query.py From python with Apache License 2.0 | 5 votes |
def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql(chunked_fetch=self.chunked_fetch) select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] related_populators = get_related_populators(klass_info, select, db) for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) if related_populators: for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model, if there are any if queryset._known_related_objects: for field, rel_objs in queryset._known_related_objects.items(): # Avoid overwriting objects loaded e.g. by select_related if hasattr(obj, field.get_cache_name()): continue pk = getattr(obj, field.get_attname()) try: rel_obj = rel_objs[pk] except KeyError: pass # may happen in qs1 | qs2 scenarios else: setattr(obj, field.name, rel_obj) yield obj
Example #18
Source File: query.py From bioforum with MIT License | 5 votes |
def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) return chain.from_iterable(compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size))
Example #19
Source File: query.py From bioforum with MIT License | 5 votes |
def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) field_names = list(query.values_select) extra_names = list(query.extra_select) annotation_names = list(query.annotation_select) # extra(select=...) cols are always at the start of the row. names = extra_names + field_names + annotation_names indexes = range(len(names)) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield {names[i]: row[i] for i in indexes}
Example #20
Source File: query.py From bioforum with MIT License | 5 votes |
def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] related_populators = get_related_populators(klass_info, select, db) for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) if related_populators: for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model, if there are any if queryset._known_related_objects: for field, rel_objs in queryset._known_related_objects.items(): # Avoid overwriting objects loaded e.g. by select_related if field.is_cached(obj): continue pk = getattr(obj, field.get_attname()) try: rel_obj = rel_objs[pk] except KeyError: pass # may happen in qs1 | qs2 scenarios else: setattr(obj, field.name, rel_obj) yield obj
Example #21
Source File: query.py From GTDWeb with GNU General Public License v2.0 | 5 votes |
def __iter__(self): # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) query = iter(self.query) try: model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order() # Find out which model's fields are not present in the query. skip = set() for field in self.model._meta.fields: if field.attname not in model_init_names: skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery('Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model fields = [self.model_fields.get(c, None) for c in self.columns] converters = compiler.get_converters([ f.get_col(f.model._meta.db_table) if f else None for f in fields ]) for values in query: if converters: values = compiler.apply_converters(values, converters) # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(self.query, 'cursor') and self.query.cursor: self.query.cursor.close()
Example #22
Source File: query.py From GTDWeb with GNU General Public License v2.0 | 5 votes |
def iterator(self): compiler = self.query.get_compiler(self.db) if self.flat and len(self._fields) == 1: for row in compiler.results_iter(): yield row[0] elif not self.query.extra_select and not self.query.annotation_select: for row in compiler.results_iter(): yield tuple(row) else: # When extra(select=...) or an annotation is involved, the extra # cols are always at the start of the row, and we need to reorder # the fields to match the order in self._fields. extra_names = list(self.query.extra_select) field_names = self.field_names annotation_names = list(self.query.annotation_select) names = extra_names + field_names + annotation_names # If a field list has been specified, use it. Otherwise, use the # full list of fields, including extras and annotations. if self._fields: fields = list(self._fields) + [f for f in annotation_names if f not in self._fields] else: fields = names for row in compiler.results_iter(): data = dict(zip(names, row)) yield tuple(data[f] for f in fields)
Example #23
Source File: query.py From openhgsenti with Apache License 2.0 | 4 votes |
def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql() select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) if klass_info is None: return model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] if len(init_list) != len(model_cls._meta.concrete_fields): init_set = set(init_list) skip = [f.attname for f in model_cls._meta.concrete_fields if f.attname not in init_set] model_cls = deferred_class_factory(model_cls, skip) related_populators = get_related_populators(klass_info, select, db) for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) if related_populators: for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model, if there are any if queryset._known_related_objects: for field, rel_objs in queryset._known_related_objects.items(): # Avoid overwriting objects loaded e.g. by select_related if hasattr(obj, field.get_cache_name()): continue pk = getattr(obj, field.get_attname()) try: rel_obj = rel_objs[pk] except KeyError: pass # may happen in qs1 | qs2 scenarios else: setattr(obj, field.name, rel_obj) yield obj
Example #24
Source File: query.py From GTDWeb with GNU General Public License v2.0 | 4 votes |
def iterator(self): """ An iterator over the results from applying this QuerySet to the database. """ db = self.db compiler = self.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql() select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) if klass_info is None: return model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] if len(init_list) != len(model_cls._meta.concrete_fields): init_set = set(init_list) skip = [f.attname for f in model_cls._meta.concrete_fields if f.attname not in init_set] model_cls = deferred_class_factory(model_cls, skip) related_populators = get_related_populators(klass_info, select, db) for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) if related_populators: for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model, if there are any if self._known_related_objects: for field, rel_objs in self._known_related_objects.items(): # Avoid overwriting objects loaded e.g. by select_related if hasattr(obj, field.get_cache_name()): continue pk = getattr(obj, field.get_attname()) try: rel_obj = rel_objs[pk] except KeyError: pass # may happen in qs1 | qs2 scenarios else: setattr(obj, field.name, rel_obj) yield obj