| """ |
| The main QuerySet implementation. This provides the public API for the ORM. |
| """ |
|
|
| import copy |
| import operator |
| import warnings |
| from itertools import chain, islice |
|
|
| from asgiref.sync import sync_to_async |
|
|
| import django |
| from django.conf import settings |
| from django.core import exceptions |
| from django.db import ( |
| DJANGO_VERSION_PICKLE_KEY, |
| IntegrityError, |
| NotSupportedError, |
| connections, |
| router, |
| transaction, |
| ) |
| from django.db.models import AutoField, DateField, DateTimeField, Field, sql |
| from django.db.models.constants import LOOKUP_SEP, OnConflict |
| from django.db.models.deletion import Collector |
| from django.db.models.expressions import Case, F, Value, When |
| from django.db.models.functions import Cast, Trunc |
| from django.db.models.query_utils import FilteredRelation, Q |
| from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE |
| from django.db.models.utils import ( |
| AltersData, |
| create_namedtuple_class, |
| resolve_callables, |
| ) |
| from django.utils import timezone |
| from django.utils.functional import cached_property, partition |
|
|
| |
| MAX_GET_RESULTS = 21 |
|
|
| |
| REPR_OUTPUT_SIZE = 20 |
|
|
|
|
| class BaseIterable: |
| def __init__( |
| self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE |
| ): |
| self.queryset = queryset |
| self.chunked_fetch = chunked_fetch |
| self.chunk_size = chunk_size |
|
|
| async def _async_generator(self): |
| |
| |
| |
| sync_generator = self.__iter__() |
|
|
| def next_slice(gen): |
| return list(islice(gen, self.chunk_size)) |
|
|
| while True: |
| chunk = await sync_to_async(next_slice)(sync_generator) |
| for item in chunk: |
| yield item |
| if len(chunk) < self.chunk_size: |
| break |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| def __aiter__(self): |
| return self._async_generator() |
|
|
|
|
| class ModelIterable(BaseIterable): |
| """Iterable that yields a model instance for each row.""" |
|
|
| def __iter__(self): |
| queryset = self.queryset |
| db = queryset.db |
| compiler = queryset.query.get_compiler(using=db) |
| |
| |
| results = compiler.execute_sql( |
| chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size |
| ) |
| select, klass_info, annotation_col_map = ( |
| compiler.select, |
| compiler.klass_info, |
| compiler.annotation_col_map, |
| ) |
| model_cls = klass_info["model"] |
| select_fields = klass_info["select_fields"] |
| model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 |
| init_list = [ |
| f[0].target.attname for f in select[model_fields_start:model_fields_end] |
| ] |
| related_populators = get_related_populators(klass_info, select, db) |
| known_related_objects = [ |
| ( |
| field, |
| related_objs, |
| operator.attrgetter( |
| *[ |
| field.attname |
| if from_field == "self" |
| else queryset.model._meta.get_field(from_field).attname |
| for from_field in field.from_fields |
| ] |
| ), |
| ) |
| for field, related_objs in queryset._known_related_objects.items() |
| ] |
| for row in compiler.results_iter(results): |
| obj = model_cls.from_db( |
| db, init_list, row[model_fields_start:model_fields_end] |
| ) |
| for rel_populator in related_populators: |
| rel_populator.populate(row, obj) |
| if annotation_col_map: |
| for attr_name, col_pos in annotation_col_map.items(): |
| setattr(obj, attr_name, row[col_pos]) |
|
|
| |
| for field, rel_objs, rel_getter in known_related_objects: |
| |
| if field.is_cached(obj): |
| continue |
| rel_obj_id = rel_getter(obj) |
| try: |
| rel_obj = rel_objs[rel_obj_id] |
| except KeyError: |
| pass |
| else: |
| setattr(obj, field.name, rel_obj) |
|
|
| yield obj |
|
|
|
|
| class RawModelIterable(BaseIterable): |
| """ |
| Iterable that yields a model instance for each row from a raw queryset. |
| """ |
|
|
| def __iter__(self): |
| |
| db = self.queryset.db |
| query = self.queryset.query |
| connection = connections[db] |
| compiler = connection.ops.compiler("SQLCompiler")(query, connection, db) |
| query_iterator = iter(query) |
|
|
| try: |
| ( |
| model_init_names, |
| model_init_pos, |
| annotation_fields, |
| ) = self.queryset.resolve_model_init_order() |
| model_cls = self.queryset.model |
| if model_cls._meta.pk.attname not in model_init_names: |
| raise exceptions.FieldDoesNotExist( |
| "Raw query must include the primary key" |
| ) |
| fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns] |
| converters = compiler.get_converters( |
| [f.get_col(f.model._meta.db_table) if f else None for f in fields] |
| ) |
| if converters: |
| query_iterator = compiler.apply_converters(query_iterator, converters) |
| for values in query_iterator: |
| |
| model_init_values = [values[pos] for pos in model_init_pos] |
| instance = model_cls.from_db(db, model_init_names, model_init_values) |
| if annotation_fields: |
| for column, pos in annotation_fields: |
| setattr(instance, column, values[pos]) |
| yield instance |
| finally: |
| |
| if hasattr(query, "cursor") and query.cursor: |
| query.cursor.close() |
|
|
|
|
| class ValuesIterable(BaseIterable): |
| """ |
| Iterable returned by QuerySet.values() that yields a dict for each row. |
| """ |
|
|
| def __iter__(self): |
| queryset = self.queryset |
| query = queryset.query |
| compiler = query.get_compiler(queryset.db) |
|
|
| |
| names = [ |
| *query.extra_select, |
| *query.values_select, |
| *query.annotation_select, |
| ] |
| indexes = range(len(names)) |
| for row in compiler.results_iter( |
| chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size |
| ): |
| yield {names[i]: row[i] for i in indexes} |
|
|
|
|
| class ValuesListIterable(BaseIterable): |
| """ |
| Iterable returned by QuerySet.values_list(flat=False) that yields a tuple |
| for each row. |
| """ |
|
|
| def __iter__(self): |
| queryset = self.queryset |
| query = queryset.query |
| compiler = query.get_compiler(queryset.db) |
|
|
| if queryset._fields: |
| |
| names = [ |
| *query.extra_select, |
| *query.values_select, |
| *query.annotation_select, |
| ] |
| fields = [ |
| *queryset._fields, |
| *(f for f in query.annotation_select if f not in queryset._fields), |
| ] |
| if fields != names: |
| |
| index_map = {name: idx for idx, name in enumerate(names)} |
| rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) |
| return map( |
| rowfactory, |
| compiler.results_iter( |
| chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size |
| ), |
| ) |
| return compiler.results_iter( |
| tuple_expected=True, |
| chunked_fetch=self.chunked_fetch, |
| chunk_size=self.chunk_size, |
| ) |
|
|
|
|
| class NamedValuesListIterable(ValuesListIterable): |
| """ |
| Iterable returned by QuerySet.values_list(named=True) that yields a |
| namedtuple for each row. |
| """ |
|
|
| def __iter__(self): |
| queryset = self.queryset |
| if queryset._fields: |
| names = queryset._fields |
| else: |
| query = queryset.query |
| names = [ |
| *query.extra_select, |
| *query.values_select, |
| *query.annotation_select, |
| ] |
| tuple_class = create_namedtuple_class(*names) |
| new = tuple.__new__ |
| for row in super().__iter__(): |
| yield new(tuple_class, row) |
|
|
|
|
| class FlatValuesListIterable(BaseIterable): |
| """ |
| Iterable returned by QuerySet.values_list(flat=True) that yields single |
| values. |
| """ |
|
|
| def __iter__(self): |
| queryset = self.queryset |
| compiler = queryset.query.get_compiler(queryset.db) |
| for row in compiler.results_iter( |
| chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size |
| ): |
| yield row[0] |
|
|
|
|
| class QuerySet(AltersData): |
| """Represent a lazy database lookup for a set of objects.""" |
|
|
| def __init__(self, model=None, query=None, using=None, hints=None): |
| self.model = model |
| self._db = using |
| self._hints = hints or {} |
| self._query = query or sql.Query(self.model) |
| self._result_cache = None |
| self._sticky_filter = False |
| self._for_write = False |
| self._prefetch_related_lookups = () |
| self._prefetch_done = False |
| self._known_related_objects = {} |
| self._iterable_class = ModelIterable |
| self._fields = None |
| self._defer_next_filter = False |
| self._deferred_filter = None |
|
|
| @property |
| def query(self): |
| if self._deferred_filter: |
| negate, args, kwargs = self._deferred_filter |
| self._filter_or_exclude_inplace(negate, args, kwargs) |
| self._deferred_filter = None |
| return self._query |
|
|
| @query.setter |
| def query(self, value): |
| if value.values_select: |
| self._iterable_class = ValuesIterable |
| self._query = value |
|
|
| def as_manager(cls): |
| |
| from django.db.models.manager import Manager |
|
|
| manager = Manager.from_queryset(cls)() |
| manager._built_with_as_manager = True |
| return manager |
|
|
| as_manager.queryset_only = True |
| as_manager = classmethod(as_manager) |
|
|
| |
| |
| |
|
|
| def __deepcopy__(self, memo): |
| """Don't populate the QuerySet's cache.""" |
| obj = self.__class__() |
| for k, v in self.__dict__.items(): |
| if k == "_result_cache": |
| obj.__dict__[k] = None |
| else: |
| obj.__dict__[k] = copy.deepcopy(v, memo) |
| return obj |
|
|
| def __getstate__(self): |
| |
| self._fetch_all() |
| return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__} |
|
|
| def __setstate__(self, state): |
| pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) |
| if pickled_version: |
| if pickled_version != django.__version__: |
| warnings.warn( |
| "Pickled queryset instance's Django version %s does not " |
| "match the current version %s." |
| % (pickled_version, django.__version__), |
| RuntimeWarning, |
| stacklevel=2, |
| ) |
| else: |
| warnings.warn( |
| "Pickled queryset instance's Django version is not specified.", |
| RuntimeWarning, |
| stacklevel=2, |
| ) |
| self.__dict__.update(state) |
|
|
| def __repr__(self): |
| data = list(self[: REPR_OUTPUT_SIZE + 1]) |
| if len(data) > REPR_OUTPUT_SIZE: |
| data[-1] = "...(remaining elements truncated)..." |
| return "<%s %r>" % (self.__class__.__name__, data) |
|
|
| def __len__(self): |
| self._fetch_all() |
| return len(self._result_cache) |
|
|
| def __iter__(self): |
| """ |
| The queryset iterator protocol uses three nested iterators in the |
| default case: |
| 1. sql.compiler.execute_sql() |
| - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) |
| using cursor.fetchmany(). This part is responsible for |
| doing some column masking, and returning the rows in chunks. |
| 2. sql.compiler.results_iter() |
| - Returns one row at time. At this point the rows are still just |
| tuples. In some cases the return values are converted to |
| Python values at this location. |
| 3. self.iterator() |
| - Responsible for turning the rows into model objects. |
| """ |
| self._fetch_all() |
| return iter(self._result_cache) |
|
|
| def __aiter__(self): |
| |
| |
| async def generator(): |
| await sync_to_async(self._fetch_all)() |
| for item in self._result_cache: |
| yield item |
|
|
| return generator() |
|
|
| def __bool__(self): |
| self._fetch_all() |
| return bool(self._result_cache) |
|
|
| def __getitem__(self, k): |
| """Retrieve an item or slice from the set of results.""" |
| if not isinstance(k, (int, slice)): |
| raise TypeError( |
| "QuerySet indices must be integers or slices, not %s." |
| % type(k).__name__ |
| ) |
| if (isinstance(k, int) and k < 0) or ( |
| isinstance(k, slice) |
| and ( |
| (k.start is not None and k.start < 0) |
| or (k.stop is not None and k.stop < 0) |
| ) |
| ): |
| raise ValueError("Negative indexing is not supported.") |
|
|
| if self._result_cache is not None: |
| return self._result_cache[k] |
|
|
| if isinstance(k, slice): |
| qs = self._chain() |
| if k.start is not None: |
| start = int(k.start) |
| else: |
| start = None |
| if k.stop is not None: |
| stop = int(k.stop) |
| else: |
| stop = None |
| qs.query.set_limits(start, stop) |
| return list(qs)[:: k.step] if k.step else qs |
|
|
| qs = self._chain() |
| qs.query.set_limits(k, k + 1) |
| qs._fetch_all() |
| return qs._result_cache[0] |
|
|
| def __class_getitem__(cls, *args, **kwargs): |
| return cls |
|
|
| def __and__(self, other): |
| self._check_operator_queryset(other, "&") |
| self._merge_sanity_check(other) |
| if isinstance(other, EmptyQuerySet): |
| return other |
| if isinstance(self, EmptyQuerySet): |
| return self |
| combined = self._chain() |
| combined._merge_known_related_objects(other) |
| combined.query.combine(other.query, sql.AND) |
| return combined |
|
|
| def __or__(self, other): |
| self._check_operator_queryset(other, "|") |
| self._merge_sanity_check(other) |
| if isinstance(self, EmptyQuerySet): |
| return other |
| if isinstance(other, EmptyQuerySet): |
| return self |
| query = ( |
| self |
| if self.query.can_filter() |
| else self.model._base_manager.filter(pk__in=self.values("pk")) |
| ) |
| combined = query._chain() |
| combined._merge_known_related_objects(other) |
| if not other.query.can_filter(): |
| other = other.model._base_manager.filter(pk__in=other.values("pk")) |
| combined.query.combine(other.query, sql.OR) |
| return combined |
|
|
| def __xor__(self, other): |
| self._check_operator_queryset(other, "^") |
| self._merge_sanity_check(other) |
| if isinstance(self, EmptyQuerySet): |
| return other |
| if isinstance(other, EmptyQuerySet): |
| return self |
| query = ( |
| self |
| if self.query.can_filter() |
| else self.model._base_manager.filter(pk__in=self.values("pk")) |
| ) |
| combined = query._chain() |
| combined._merge_known_related_objects(other) |
| if not other.query.can_filter(): |
| other = other.model._base_manager.filter(pk__in=other.values("pk")) |
| combined.query.combine(other.query, sql.XOR) |
| return combined |
|
|
| |
| |
| |
|
|
| def _iterator(self, use_chunked_fetch, chunk_size): |
| iterable = self._iterable_class( |
| self, |
| chunked_fetch=use_chunked_fetch, |
| chunk_size=chunk_size or 2000, |
| ) |
| if not self._prefetch_related_lookups or chunk_size is None: |
| yield from iterable |
| return |
|
|
| iterator = iter(iterable) |
| while results := list(islice(iterator, chunk_size)): |
| prefetch_related_objects(results, *self._prefetch_related_lookups) |
| yield from results |
|
|
| def iterator(self, chunk_size=None): |
| """ |
| An iterator over the results from applying this QuerySet to the |
| database. chunk_size must be provided for QuerySets that prefetch |
| related objects. Otherwise, a default chunk_size of 2000 is supplied. |
| """ |
| if chunk_size is None: |
| if self._prefetch_related_lookups: |
| raise ValueError( |
| "chunk_size must be provided when using QuerySet.iterator() after " |
| "prefetch_related()." |
| ) |
| elif chunk_size <= 0: |
| raise ValueError("Chunk size must be strictly positive.") |
| use_chunked_fetch = not connections[self.db].settings_dict.get( |
| "DISABLE_SERVER_SIDE_CURSORS" |
| ) |
| return self._iterator(use_chunked_fetch, chunk_size) |
|
|
| async def aiterator(self, chunk_size=2000): |
| """ |
| An asynchronous iterator over the results from applying this QuerySet |
| to the database. |
| """ |
| if self._prefetch_related_lookups: |
| raise NotSupportedError( |
| "Using QuerySet.aiterator() after prefetch_related() is not supported." |
| ) |
| if chunk_size <= 0: |
| raise ValueError("Chunk size must be strictly positive.") |
| use_chunked_fetch = not connections[self.db].settings_dict.get( |
| "DISABLE_SERVER_SIDE_CURSORS" |
| ) |
| async for item in self._iterable_class( |
| self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size |
| ): |
| yield item |
|
|
| def aggregate(self, *args, **kwargs): |
| """ |
| Return a dictionary containing the calculations (aggregation) |
| over the current queryset. |
| |
| If args is present the expression is passed as a kwarg using |
| the Aggregate object's default alias. |
| """ |
| if self.query.distinct_fields: |
| raise NotImplementedError("aggregate() + distinct(fields) not implemented.") |
| self._validate_values_are_expressions( |
| (*args, *kwargs.values()), method_name="aggregate" |
| ) |
| for arg in args: |
| |
| |
| |
| try: |
| arg.default_alias |
| except (AttributeError, TypeError): |
| raise TypeError("Complex aggregates require an alias") |
| kwargs[arg.default_alias] = arg |
|
|
| return self.query.chain().get_aggregation(self.db, kwargs) |
|
|
| async def aaggregate(self, *args, **kwargs): |
| return await sync_to_async(self.aggregate)(*args, **kwargs) |
|
|
| def count(self): |
| """ |
| Perform a SELECT COUNT() and return the number of records as an |
| integer. |
| |
| If the QuerySet is already fully cached, return the length of the |
| cached results set to avoid multiple SELECT COUNT(*) calls. |
| """ |
| if self._result_cache is not None: |
| return len(self._result_cache) |
|
|
| return self.query.get_count(using=self.db) |
|
|
| async def acount(self): |
| return await sync_to_async(self.count)() |
|
|
| def get(self, *args, **kwargs): |
| """ |
| Perform the query and return a single object matching the given |
| keyword arguments. |
| """ |
| if self.query.combinator and (args or kwargs): |
| raise NotSupportedError( |
| "Calling QuerySet.get(...) with filters after %s() is not " |
| "supported." % self.query.combinator |
| ) |
| clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) |
| if self.query.can_filter() and not self.query.distinct_fields: |
| clone = clone.order_by() |
| limit = None |
| if ( |
| not clone.query.select_for_update |
| or connections[clone.db].features.supports_select_for_update_with_limit |
| ): |
| limit = MAX_GET_RESULTS |
| clone.query.set_limits(high=limit) |
| num = len(clone) |
| if num == 1: |
| return clone._result_cache[0] |
| if not num: |
| raise self.model.DoesNotExist( |
| "%s matching query does not exist." % self.model._meta.object_name |
| ) |
| raise self.model.MultipleObjectsReturned( |
| "get() returned more than one %s -- it returned %s!" |
| % ( |
| self.model._meta.object_name, |
| num if not limit or num < limit else "more than %s" % (limit - 1), |
| ) |
| ) |
|
|
| async def aget(self, *args, **kwargs): |
| return await sync_to_async(self.get)(*args, **kwargs) |
|
|
| def create(self, **kwargs): |
| """ |
| Create a new object with the given kwargs, saving it to the database |
| and returning the created object. |
| """ |
| obj = self.model(**kwargs) |
| self._for_write = True |
| obj.save(force_insert=True, using=self.db) |
| return obj |
|
|
| async def acreate(self, **kwargs): |
| return await sync_to_async(self.create)(**kwargs) |
|
|
| def _prepare_for_bulk_create(self, objs): |
| from django.db.models.expressions import DatabaseDefault |
|
|
| connection = connections[self.db] |
| for obj in objs: |
| if obj.pk is None: |
| |
| obj.pk = obj._meta.pk.get_pk_value_on_save(obj) |
| if not connection.features.supports_default_keyword_in_bulk_insert: |
| for field in obj._meta.fields: |
| value = getattr(obj, field.attname) |
| if isinstance(value, DatabaseDefault): |
| setattr(obj, field.attname, field.db_default) |
|
|
| obj._prepare_related_fields_for_save(operation_name="bulk_create") |
|
|
| def _check_bulk_create_options( |
| self, ignore_conflicts, update_conflicts, update_fields, unique_fields |
| ): |
| if ignore_conflicts and update_conflicts: |
| raise ValueError( |
| "ignore_conflicts and update_conflicts are mutually exclusive." |
| ) |
| db_features = connections[self.db].features |
| if ignore_conflicts: |
| if not db_features.supports_ignore_conflicts: |
| raise NotSupportedError( |
| "This database backend does not support ignoring conflicts." |
| ) |
| return OnConflict.IGNORE |
| elif update_conflicts: |
| if not db_features.supports_update_conflicts: |
| raise NotSupportedError( |
| "This database backend does not support updating conflicts." |
| ) |
| if not update_fields: |
| raise ValueError( |
| "Fields that will be updated when a row insertion fails " |
| "on conflicts must be provided." |
| ) |
| if unique_fields and not db_features.supports_update_conflicts_with_target: |
| raise NotSupportedError( |
| "This database backend does not support updating " |
| "conflicts with specifying unique fields that can trigger " |
| "the upsert." |
| ) |
| if not unique_fields and db_features.supports_update_conflicts_with_target: |
| raise ValueError( |
| "Unique fields that can trigger the upsert must be provided." |
| ) |
| |
| if any(not f.concrete or f.many_to_many for f in update_fields): |
| raise ValueError( |
| "bulk_create() can only be used with concrete fields in " |
| "update_fields." |
| ) |
| if any(f.primary_key for f in update_fields): |
| raise ValueError( |
| "bulk_create() cannot be used with primary keys in " |
| "update_fields." |
| ) |
| if unique_fields: |
| if any(not f.concrete or f.many_to_many for f in unique_fields): |
| raise ValueError( |
| "bulk_create() can only be used with concrete fields " |
| "in unique_fields." |
| ) |
| return OnConflict.UPDATE |
| return None |
|
|
| def bulk_create( |
| self, |
| objs, |
| batch_size=None, |
| ignore_conflicts=False, |
| update_conflicts=False, |
| update_fields=None, |
| unique_fields=None, |
| ): |
| """ |
| Insert each of the instances into the database. Do *not* call |
| save() on each of the instances, do not send any pre/post_save |
| signals, and do not set the primary key attribute if it is an |
| autoincrement field (except if features.can_return_rows_from_bulk_insert=True). |
| Multi-table models are not supported. |
| """ |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if batch_size is not None and batch_size <= 0: |
| raise ValueError("Batch size must be a positive integer.") |
| |
| |
| |
| |
| for parent in self.model._meta.get_parent_list(): |
| if parent._meta.concrete_model is not self.model._meta.concrete_model: |
| raise ValueError("Can't bulk create a multi-table inherited model") |
| if not objs: |
| return objs |
| opts = self.model._meta |
| if unique_fields: |
| |
| unique_fields = [ |
| self.model._meta.get_field(opts.pk.name if name == "pk" else name) |
| for name in unique_fields |
| ] |
| if update_fields: |
| update_fields = [self.model._meta.get_field(name) for name in update_fields] |
| on_conflict = self._check_bulk_create_options( |
| ignore_conflicts, |
| update_conflicts, |
| update_fields, |
| unique_fields, |
| ) |
| self._for_write = True |
| fields = opts.concrete_fields |
| objs = list(objs) |
| self._prepare_for_bulk_create(objs) |
| with transaction.atomic(using=self.db, savepoint=False): |
| objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) |
| if objs_with_pk: |
| returned_columns = self._batched_insert( |
| objs_with_pk, |
| fields, |
| batch_size, |
| on_conflict=on_conflict, |
| update_fields=update_fields, |
| unique_fields=unique_fields, |
| ) |
| for obj_with_pk, results in zip(objs_with_pk, returned_columns): |
| for result, field in zip(results, opts.db_returning_fields): |
| if field != opts.pk: |
| setattr(obj_with_pk, field.attname, result) |
| for obj_with_pk in objs_with_pk: |
| obj_with_pk._state.adding = False |
| obj_with_pk._state.db = self.db |
| if objs_without_pk: |
| fields = [f for f in fields if not isinstance(f, AutoField)] |
| returned_columns = self._batched_insert( |
| objs_without_pk, |
| fields, |
| batch_size, |
| on_conflict=on_conflict, |
| update_fields=update_fields, |
| unique_fields=unique_fields, |
| ) |
| connection = connections[self.db] |
| if ( |
| connection.features.can_return_rows_from_bulk_insert |
| and on_conflict is None |
| ): |
| assert len(returned_columns) == len(objs_without_pk) |
| for obj_without_pk, results in zip(objs_without_pk, returned_columns): |
| for result, field in zip(results, opts.db_returning_fields): |
| setattr(obj_without_pk, field.attname, result) |
| obj_without_pk._state.adding = False |
| obj_without_pk._state.db = self.db |
|
|
| return objs |
|
|
| async def abulk_create( |
| self, |
| objs, |
| batch_size=None, |
| ignore_conflicts=False, |
| update_conflicts=False, |
| update_fields=None, |
| unique_fields=None, |
| ): |
| return await sync_to_async(self.bulk_create)( |
| objs=objs, |
| batch_size=batch_size, |
| ignore_conflicts=ignore_conflicts, |
| update_conflicts=update_conflicts, |
| update_fields=update_fields, |
| unique_fields=unique_fields, |
| ) |
|
|
| def bulk_update(self, objs, fields, batch_size=None): |
| """ |
| Update the given fields in each of the given objects in the database. |
| """ |
| if batch_size is not None and batch_size <= 0: |
| raise ValueError("Batch size must be a positive integer.") |
| if not fields: |
| raise ValueError("Field names must be given to bulk_update().") |
| objs = tuple(objs) |
| if any(obj.pk is None for obj in objs): |
| raise ValueError("All bulk_update() objects must have a primary key set.") |
| fields = [self.model._meta.get_field(name) for name in fields] |
| if any(not f.concrete or f.many_to_many for f in fields): |
| raise ValueError("bulk_update() can only be used with concrete fields.") |
| if any(f.primary_key for f in fields): |
| raise ValueError("bulk_update() cannot be used with primary key fields.") |
| if not objs: |
| return 0 |
| for obj in objs: |
| obj._prepare_related_fields_for_save( |
| operation_name="bulk_update", fields=fields |
| ) |
| |
| |
| self._for_write = True |
| connection = connections[self.db] |
| max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs) |
| batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size |
| requires_casting = connection.features.requires_casted_case_in_updates |
| batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size)) |
| updates = [] |
| for batch_objs in batches: |
| update_kwargs = {} |
| for field in fields: |
| when_statements = [] |
| for obj in batch_objs: |
| attr = getattr(obj, field.attname) |
| if not hasattr(attr, "resolve_expression"): |
| attr = Value(attr, output_field=field) |
| when_statements.append(When(pk=obj.pk, then=attr)) |
| case_statement = Case(*when_statements, output_field=field) |
| if requires_casting: |
| case_statement = Cast(case_statement, output_field=field) |
| update_kwargs[field.attname] = case_statement |
| updates.append(([obj.pk for obj in batch_objs], update_kwargs)) |
| rows_updated = 0 |
| queryset = self.using(self.db) |
| with transaction.atomic(using=self.db, savepoint=False): |
| for pks, update_kwargs in updates: |
| rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs) |
| return rows_updated |
|
|
| bulk_update.alters_data = True |
|
|
| async def abulk_update(self, objs, fields, batch_size=None): |
| return await sync_to_async(self.bulk_update)( |
| objs=objs, |
| fields=fields, |
| batch_size=batch_size, |
| ) |
|
|
| abulk_update.alters_data = True |
|
|
| def get_or_create(self, defaults=None, **kwargs): |
| """ |
| Look up an object with the given kwargs, creating one if necessary. |
| Return a tuple of (object, created), where created is a boolean |
| specifying whether an object was created. |
| """ |
| |
| |
| self._for_write = True |
| try: |
| return self.get(**kwargs), False |
| except self.model.DoesNotExist: |
| params = self._extract_model_params(defaults, **kwargs) |
| |
| try: |
| with transaction.atomic(using=self.db): |
| params = dict(resolve_callables(params)) |
| return self.create(**params), True |
| except IntegrityError: |
| try: |
| return self.get(**kwargs), False |
| except self.model.DoesNotExist: |
| pass |
| raise |
|
|
| async def aget_or_create(self, defaults=None, **kwargs): |
| return await sync_to_async(self.get_or_create)( |
| defaults=defaults, |
| **kwargs, |
| ) |
|
|
| def update_or_create(self, defaults=None, create_defaults=None, **kwargs): |
| """ |
| Look up an object with the given kwargs, updating one with defaults |
| if it exists, otherwise create a new one. Optionally, an object can |
| be created with different values than defaults by using |
| create_defaults. |
| Return a tuple (object, created), where created is a boolean |
| specifying whether an object was created. |
| """ |
| if create_defaults is None: |
| update_defaults = create_defaults = defaults or {} |
| else: |
| update_defaults = defaults or {} |
| self._for_write = True |
| with transaction.atomic(using=self.db): |
| |
| |
| obj, created = self.select_for_update().get_or_create( |
| create_defaults, **kwargs |
| ) |
| if created: |
| return obj, created |
| for k, v in resolve_callables(update_defaults): |
| setattr(obj, k, v) |
|
|
| update_fields = set(update_defaults) |
| concrete_field_names = self.model._meta._non_pk_concrete_field_names |
| |
| if concrete_field_names.issuperset(update_fields): |
| |
| |
| |
| |
| for field in self.model._meta.local_concrete_fields: |
| if not ( |
| field.primary_key or field.__class__.pre_save is Field.pre_save |
| ): |
| update_fields.add(field.name) |
| if field.name != field.attname: |
| update_fields.add(field.attname) |
| obj.save(using=self.db, update_fields=update_fields) |
| else: |
| obj.save(using=self.db) |
| return obj, False |
|
|
| async def aupdate_or_create(self, defaults=None, create_defaults=None, **kwargs): |
| return await sync_to_async(self.update_or_create)( |
| defaults=defaults, |
| create_defaults=create_defaults, |
| **kwargs, |
| ) |
|
|
| def _extract_model_params(self, defaults, **kwargs): |
| """ |
| Prepare `params` for creating a model instance based on the given |
| kwargs; for use by get_or_create(). |
| """ |
| defaults = defaults or {} |
| params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} |
| params.update(defaults) |
| property_names = self.model._meta._property_names |
| invalid_params = [] |
| for param in params: |
| try: |
| self.model._meta.get_field(param) |
| except exceptions.FieldDoesNotExist: |
| |
| if not (param in property_names and getattr(self.model, param).fset): |
| invalid_params.append(param) |
| if invalid_params: |
| raise exceptions.FieldError( |
| "Invalid field name(s) for model %s: '%s'." |
| % ( |
| self.model._meta.object_name, |
| "', '".join(sorted(invalid_params)), |
| ) |
| ) |
| return params |
|
|
| def _earliest(self, *fields): |
| """ |
| Return the earliest object according to fields (if given) or by the |
| model's Meta.get_latest_by. |
| """ |
| if fields: |
| order_by = fields |
| else: |
| order_by = getattr(self.model._meta, "get_latest_by") |
| if order_by and not isinstance(order_by, (tuple, list)): |
| order_by = (order_by,) |
| if order_by is None: |
| raise ValueError( |
| "earliest() and latest() require either fields as positional " |
| "arguments or 'get_latest_by' in the model's Meta." |
| ) |
| obj = self._chain() |
| obj.query.set_limits(high=1) |
| obj.query.clear_ordering(force=True) |
| obj.query.add_ordering(*order_by) |
| return obj.get() |
|
|
| def earliest(self, *fields): |
| if self.query.is_sliced: |
| raise TypeError("Cannot change a query once a slice has been taken.") |
| return self._earliest(*fields) |
|
|
| async def aearliest(self, *fields): |
| return await sync_to_async(self.earliest)(*fields) |
|
|
| def latest(self, *fields): |
| """ |
| Return the latest object according to fields (if given) or by the |
| model's Meta.get_latest_by. |
| """ |
| if self.query.is_sliced: |
| raise TypeError("Cannot change a query once a slice has been taken.") |
| return self.reverse()._earliest(*fields) |
|
|
| async def alatest(self, *fields): |
| return await sync_to_async(self.latest)(*fields) |
|
|
| def first(self): |
| """Return the first object of a query or None if no match is found.""" |
| if self.ordered: |
| queryset = self |
| else: |
| self._check_ordering_first_last_queryset_aggregation(method="first") |
| queryset = self.order_by("pk") |
| for obj in queryset[:1]: |
| return obj |
|
|
| async def afirst(self): |
| return await sync_to_async(self.first)() |
|
|
| def last(self): |
| """Return the last object of a query or None if no match is found.""" |
| if self.ordered: |
| queryset = self.reverse() |
| else: |
| self._check_ordering_first_last_queryset_aggregation(method="last") |
| queryset = self.order_by("-pk") |
| for obj in queryset[:1]: |
| return obj |
|
|
| async def alast(self): |
| return await sync_to_async(self.last)() |
|
|
| def in_bulk(self, id_list=None, *, field_name="pk"): |
| """ |
| Return a dictionary mapping each of the given IDs to the object with |
| that ID. If `id_list` isn't provided, evaluate the entire QuerySet. |
| """ |
| if self.query.is_sliced: |
| raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().") |
| opts = self.model._meta |
| unique_fields = [ |
| constraint.fields[0] |
| for constraint in opts.total_unique_constraints |
| if len(constraint.fields) == 1 |
| ] |
| if ( |
| field_name != "pk" |
| and not opts.get_field(field_name).unique |
| and field_name not in unique_fields |
| and self.query.distinct_fields != (field_name,) |
| ): |
| raise ValueError( |
| "in_bulk()'s field_name must be a unique field but %r isn't." |
| % field_name |
| ) |
| if id_list is not None: |
| if not id_list: |
| return {} |
| filter_key = "{}__in".format(field_name) |
| batch_size = connections[self.db].features.max_query_params |
| id_list = tuple(id_list) |
| |
| |
| if batch_size and batch_size < len(id_list): |
| qs = () |
| for offset in range(0, len(id_list), batch_size): |
| batch = id_list[offset : offset + batch_size] |
| qs += tuple(self.filter(**{filter_key: batch})) |
| else: |
| qs = self.filter(**{filter_key: id_list}) |
| else: |
| qs = self._chain() |
| return {getattr(obj, field_name): obj for obj in qs} |
|
|
| async def ain_bulk(self, id_list=None, *, field_name="pk"): |
| return await sync_to_async(self.in_bulk)( |
| id_list=id_list, |
| field_name=field_name, |
| ) |
|
|
| def delete(self): |
| """Delete the records in the current QuerySet.""" |
| self._not_support_combined_queries("delete") |
| if self.query.is_sliced: |
| raise TypeError("Cannot use 'limit' or 'offset' with delete().") |
| if self.query.distinct_fields: |
| raise TypeError("Cannot call delete() after .distinct(*fields).") |
| if self._fields is not None: |
| raise TypeError("Cannot call delete() after .values() or .values_list()") |
|
|
| del_query = self._chain() |
|
|
| |
| |
| |
| del_query._for_write = True |
|
|
| |
| del_query.query.select_for_update = False |
| del_query.query.select_related = False |
| del_query.query.clear_ordering(force=True) |
|
|
| collector = Collector(using=del_query.db, origin=self) |
| collector.collect(del_query) |
| deleted, _rows_count = collector.delete() |
|
|
| |
| self._result_cache = None |
| return deleted, _rows_count |
|
|
| delete.alters_data = True |
| delete.queryset_only = True |
|
|
| async def adelete(self): |
| return await sync_to_async(self.delete)() |
|
|
| adelete.alters_data = True |
| adelete.queryset_only = True |
|
|
| def _raw_delete(self, using): |
| """ |
| Delete objects found from the given queryset in single direct SQL |
| query. No signals are sent and there is no protection for cascades. |
| """ |
| query = self.query.clone() |
| query.__class__ = sql.DeleteQuery |
| cursor = query.get_compiler(using).execute_sql(CURSOR) |
| if cursor: |
| with cursor: |
| return cursor.rowcount |
| return 0 |
|
|
| _raw_delete.alters_data = True |
|
|
| def update(self, **kwargs): |
| """ |
| Update all elements in the current QuerySet, setting all the given |
| fields to the appropriate values. |
| """ |
| self._not_support_combined_queries("update") |
| if self.query.is_sliced: |
| raise TypeError("Cannot update a query once a slice has been taken.") |
| self._for_write = True |
| query = self.query.chain(sql.UpdateQuery) |
| query.add_update_values(kwargs) |
|
|
| |
| new_order_by = [] |
| for col in query.order_by: |
| alias = col |
| descending = False |
| if isinstance(alias, str) and alias.startswith("-"): |
| alias = alias.removeprefix("-") |
| descending = True |
| if annotation := query.annotations.get(alias): |
| if getattr(annotation, "contains_aggregate", False): |
| raise exceptions.FieldError( |
| f"Cannot update when ordering by an aggregate: {annotation}" |
| ) |
| if descending: |
| annotation = annotation.desc() |
| new_order_by.append(annotation) |
| else: |
| new_order_by.append(col) |
| query.order_by = tuple(new_order_by) |
|
|
| |
| query.annotations = {} |
| with transaction.mark_for_rollback_on_error(using=self.db): |
| rows = query.get_compiler(self.db).execute_sql(CURSOR) |
| self._result_cache = None |
| return rows |
|
|
| update.alters_data = True |
|
|
| async def aupdate(self, **kwargs): |
| return await sync_to_async(self.update)(**kwargs) |
|
|
| aupdate.alters_data = True |
|
|
| def _update(self, values): |
| """ |
| A version of update() that accepts field objects instead of field names. |
| Used primarily for model saving and not intended for use by general |
| code (it requires too much poking around at model internals to be |
| useful at that level). |
| """ |
| if self.query.is_sliced: |
| raise TypeError("Cannot update a query once a slice has been taken.") |
| query = self.query.chain(sql.UpdateQuery) |
| query.add_update_fields(values) |
| |
| query.annotations = {} |
| self._result_cache = None |
| return query.get_compiler(self.db).execute_sql(CURSOR) |
|
|
| _update.alters_data = True |
| _update.queryset_only = False |
|
|
| def exists(self): |
| """ |
| Return True if the QuerySet would have any results, False otherwise. |
| """ |
| if self._result_cache is None: |
| return self.query.has_results(using=self.db) |
| return bool(self._result_cache) |
|
|
| async def aexists(self): |
| return await sync_to_async(self.exists)() |
|
|
| def contains(self, obj): |
| """ |
| Return True if the QuerySet contains the provided obj, |
| False otherwise. |
| """ |
| self._not_support_combined_queries("contains") |
| if self._fields is not None: |
| raise TypeError( |
| "Cannot call QuerySet.contains() after .values() or .values_list()." |
| ) |
| try: |
| if obj._meta.concrete_model != self.model._meta.concrete_model: |
| return False |
| except AttributeError: |
| raise TypeError("'obj' must be a model instance.") |
| if obj.pk is None: |
| raise ValueError("QuerySet.contains() cannot be used on unsaved objects.") |
| if self._result_cache is not None: |
| return obj in self._result_cache |
| return self.filter(pk=obj.pk).exists() |
|
|
| async def acontains(self, obj): |
| return await sync_to_async(self.contains)(obj=obj) |
|
|
| def _prefetch_related_objects(self): |
| |
| prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) |
| self._prefetch_done = True |
|
|
| def explain(self, *, format=None, **options): |
| """ |
| Runs an EXPLAIN on the SQL query this QuerySet would perform, and |
| returns the results. |
| """ |
| return self.query.explain(using=self.db, format=format, **options) |
|
|
| async def aexplain(self, *, format=None, **options): |
| return await sync_to_async(self.explain)(format=format, **options) |
|
|
| |
| |
| |
|
|
| def raw(self, raw_query, params=(), translations=None, using=None): |
| if using is None: |
| using = self.db |
| qs = RawQuerySet( |
| raw_query, |
| model=self.model, |
| params=params, |
| translations=translations, |
| using=using, |
| ) |
| qs._prefetch_related_lookups = self._prefetch_related_lookups[:] |
| return qs |
|
|
| def _values(self, *fields, **expressions): |
| clone = self._chain() |
| if expressions: |
| clone = clone.annotate(**expressions) |
| clone._fields = fields |
| clone.query.set_values(fields) |
| return clone |
|
|
| def values(self, *fields, **expressions): |
| fields += tuple(expressions) |
| clone = self._values(*fields, **expressions) |
| clone._iterable_class = ValuesIterable |
| return clone |
|
|
| def values_list(self, *fields, flat=False, named=False): |
| if flat and named: |
| raise TypeError("'flat' and 'named' can't be used together.") |
| if flat and len(fields) > 1: |
| raise TypeError( |
| "'flat' is not valid when values_list is called with more than one " |
| "field." |
| ) |
|
|
| field_names = {f for f in fields if not hasattr(f, "resolve_expression")} |
| _fields = [] |
| expressions = {} |
| counter = 1 |
| for field in fields: |
| if hasattr(field, "resolve_expression"): |
| field_id_prefix = getattr( |
| field, "default_alias", field.__class__.__name__.lower() |
| ) |
| while True: |
| field_id = field_id_prefix + str(counter) |
| counter += 1 |
| if field_id not in field_names: |
| break |
| expressions[field_id] = field |
| _fields.append(field_id) |
| else: |
| _fields.append(field) |
|
|
| clone = self._values(*_fields, **expressions) |
| clone._iterable_class = ( |
| NamedValuesListIterable |
| if named |
| else FlatValuesListIterable |
| if flat |
| else ValuesListIterable |
| ) |
| return clone |
|
|
| def dates(self, field_name, kind, order="ASC"): |
| """ |
| Return a list of date objects representing all available dates for |
| the given field_name, scoped to 'kind'. |
| """ |
| if kind not in ("year", "month", "week", "day"): |
| raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.") |
| if order not in ("ASC", "DESC"): |
| raise ValueError("'order' must be either 'ASC' or 'DESC'.") |
| return ( |
| self.annotate( |
| datefield=Trunc(field_name, kind, output_field=DateField()), |
| plain_field=F(field_name), |
| ) |
| .values_list("datefield", flat=True) |
| .distinct() |
| .filter(plain_field__isnull=False) |
| .order_by(("-" if order == "DESC" else "") + "datefield") |
| ) |
|
|
| def datetimes(self, field_name, kind, order="ASC", tzinfo=None): |
| """ |
| Return a list of datetime objects representing all available |
| datetimes for the given field_name, scoped to 'kind'. |
| """ |
| if kind not in ("year", "month", "week", "day", "hour", "minute", "second"): |
| raise ValueError( |
| "'kind' must be one of 'year', 'month', 'week', 'day', " |
| "'hour', 'minute', or 'second'." |
| ) |
| if order not in ("ASC", "DESC"): |
| raise ValueError("'order' must be either 'ASC' or 'DESC'.") |
| if settings.USE_TZ: |
| if tzinfo is None: |
| tzinfo = timezone.get_current_timezone() |
| else: |
| tzinfo = None |
| return ( |
| self.annotate( |
| datetimefield=Trunc( |
| field_name, |
| kind, |
| output_field=DateTimeField(), |
| tzinfo=tzinfo, |
| ), |
| plain_field=F(field_name), |
| ) |
| .values_list("datetimefield", flat=True) |
| .distinct() |
| .filter(plain_field__isnull=False) |
| .order_by(("-" if order == "DESC" else "") + "datetimefield") |
| ) |
|
|
| def none(self): |
| """Return an empty QuerySet.""" |
| clone = self._chain() |
| clone.query.set_empty() |
| return clone |
|
|
| |
| |
| |
|
|
| def all(self): |
| """ |
| Return a new QuerySet that is a copy of the current one. This allows a |
| QuerySet to proxy for a model manager in some cases. |
| """ |
| return self._chain() |
|
|
| def filter(self, *args, **kwargs): |
| """ |
| Return a new QuerySet instance with the args ANDed to the existing |
| set. |
| """ |
| self._not_support_combined_queries("filter") |
| return self._filter_or_exclude(False, args, kwargs) |
|
|
| def exclude(self, *args, **kwargs): |
| """ |
| Return a new QuerySet instance with NOT (args) ANDed to the existing |
| set. |
| """ |
| self._not_support_combined_queries("exclude") |
| return self._filter_or_exclude(True, args, kwargs) |
|
|
| def _filter_or_exclude(self, negate, args, kwargs): |
| if (args or kwargs) and self.query.is_sliced: |
| raise TypeError("Cannot filter a query once a slice has been taken.") |
| clone = self._chain() |
| if self._defer_next_filter: |
| self._defer_next_filter = False |
| clone._deferred_filter = negate, args, kwargs |
| else: |
| clone._filter_or_exclude_inplace(negate, args, kwargs) |
| return clone |
|
|
| def _filter_or_exclude_inplace(self, negate, args, kwargs): |
| if negate: |
| self._query.add_q(~Q(*args, **kwargs)) |
| else: |
| self._query.add_q(Q(*args, **kwargs)) |
|
|
| def complex_filter(self, filter_obj): |
| """ |
| Return a new QuerySet instance with filter_obj added to the filters. |
| |
| filter_obj can be a Q object or a dictionary of keyword lookup |
| arguments. |
| |
| This exists to support framework features such as 'limit_choices_to', |
| and usually it will be more natural to use other methods. |
| """ |
| if isinstance(filter_obj, Q): |
| clone = self._chain() |
| clone.query.add_q(filter_obj) |
| return clone |
| else: |
| return self._filter_or_exclude(False, args=(), kwargs=filter_obj) |
|
|
| def _combinator_query(self, combinator, *other_qs, all=False): |
| |
| clone = self._chain() |
| |
| clone.query.clear_ordering(force=True) |
| clone.query.clear_limits() |
| clone.query.combined_queries = (self.query,) + tuple( |
| qs.query for qs in other_qs |
| ) |
| clone.query.combinator = combinator |
| clone.query.combinator_all = all |
| return clone |
|
|
| def union(self, *other_qs, all=False): |
| |
| if isinstance(self, EmptyQuerySet): |
| qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] |
| if not qs: |
| return self |
| if len(qs) == 1: |
| return qs[0] |
| return qs[0]._combinator_query("union", *qs[1:], all=all) |
| return self._combinator_query("union", *other_qs, all=all) |
|
|
| def intersection(self, *other_qs): |
| |
| if isinstance(self, EmptyQuerySet): |
| return self |
| for other in other_qs: |
| if isinstance(other, EmptyQuerySet): |
| return other |
| return self._combinator_query("intersection", *other_qs) |
|
|
| def difference(self, *other_qs): |
| |
| if isinstance(self, EmptyQuerySet): |
| return self |
| return self._combinator_query("difference", *other_qs) |
|
|
| def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False): |
| """ |
| Return a new QuerySet instance that will select objects with a |
| FOR UPDATE lock. |
| """ |
| if nowait and skip_locked: |
| raise ValueError("The nowait option cannot be used with skip_locked.") |
| obj = self._chain() |
| obj._for_write = True |
| obj.query.select_for_update = True |
| obj.query.select_for_update_nowait = nowait |
| obj.query.select_for_update_skip_locked = skip_locked |
| obj.query.select_for_update_of = of |
| obj.query.select_for_no_key_update = no_key |
| return obj |
|
|
| def select_related(self, *fields): |
| """ |
| Return a new QuerySet instance that will select related objects. |
| |
| If fields are specified, they must be ForeignKey fields and only those |
| related objects are included in the selection. |
| |
| If select_related(None) is called, clear the list. |
| """ |
| self._not_support_combined_queries("select_related") |
| if self._fields is not None: |
| raise TypeError( |
| "Cannot call select_related() after .values() or .values_list()" |
| ) |
|
|
| obj = self._chain() |
| if fields == (None,): |
| obj.query.select_related = False |
| elif fields: |
| obj.query.add_select_related(fields) |
| else: |
| obj.query.select_related = True |
| return obj |
|
|
| def prefetch_related(self, *lookups): |
| """ |
| Return a new QuerySet instance that will prefetch the specified |
| Many-To-One and Many-To-Many related objects when the QuerySet is |
| evaluated. |
| |
| When prefetch_related() is called more than once, append to the list of |
| prefetch lookups. If prefetch_related(None) is called, clear the list. |
| """ |
| self._not_support_combined_queries("prefetch_related") |
| clone = self._chain() |
| if lookups == (None,): |
| clone._prefetch_related_lookups = () |
| else: |
| for lookup in lookups: |
| if isinstance(lookup, Prefetch): |
| lookup = lookup.prefetch_to |
| lookup = lookup.split(LOOKUP_SEP, 1)[0] |
| if lookup in self.query._filtered_relations: |
| raise ValueError( |
| "prefetch_related() is not supported with FilteredRelation." |
| ) |
| clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups |
| return clone |
|
|
| def annotate(self, *args, **kwargs): |
| """ |
| Return a query set in which the returned objects have been annotated |
| with extra data or aggregations. |
| """ |
| self._not_support_combined_queries("annotate") |
| return self._annotate(args, kwargs, select=True) |
|
|
| def alias(self, *args, **kwargs): |
| """ |
| Return a query set with added aliases for extra data or aggregations. |
| """ |
| self._not_support_combined_queries("alias") |
| return self._annotate(args, kwargs, select=False) |
|
|
| def _annotate(self, args, kwargs, select=True): |
| self._validate_values_are_expressions( |
| args + tuple(kwargs.values()), method_name="annotate" |
| ) |
| annotations = {} |
| for arg in args: |
| |
| try: |
| if arg.default_alias in kwargs: |
| raise ValueError( |
| "The named annotation '%s' conflicts with the " |
| "default name for another annotation." % arg.default_alias |
| ) |
| except TypeError: |
| raise TypeError("Complex annotations require an alias") |
| annotations[arg.default_alias] = arg |
| annotations.update(kwargs) |
|
|
| clone = self._chain() |
| names = self._fields |
| if names is None: |
| names = set( |
| chain.from_iterable( |
| (field.name, field.attname) |
| if hasattr(field, "attname") |
| else (field.name,) |
| for field in self.model._meta.get_fields() |
| ) |
| ) |
|
|
| for alias, annotation in annotations.items(): |
| if alias in names: |
| raise ValueError( |
| "The annotation '%s' conflicts with a field on " |
| "the model." % alias |
| ) |
| if isinstance(annotation, FilteredRelation): |
| clone.query.add_filtered_relation(annotation, alias) |
| else: |
| clone.query.add_annotation( |
| annotation, |
| alias, |
| select=select, |
| ) |
| for alias, annotation in clone.query.annotations.items(): |
| if alias in annotations and annotation.contains_aggregate: |
| if clone._fields is None: |
| clone.query.group_by = True |
| else: |
| clone.query.set_group_by() |
| break |
|
|
| return clone |
|
|
| def order_by(self, *field_names): |
| """Return a new QuerySet instance with the ordering changed.""" |
| if self.query.is_sliced: |
| raise TypeError("Cannot reorder a query once a slice has been taken.") |
| obj = self._chain() |
| obj.query.clear_ordering(force=True, clear_default=False) |
| obj.query.add_ordering(*field_names) |
| return obj |
|
|
| def distinct(self, *field_names): |
| """ |
| Return a new QuerySet instance that will select only distinct results. |
| """ |
| self._not_support_combined_queries("distinct") |
| if self.query.is_sliced: |
| raise TypeError( |
| "Cannot create distinct fields once a slice has been taken." |
| ) |
| obj = self._chain() |
| obj.query.add_distinct_fields(*field_names) |
| return obj |
|
|
| def extra( |
| self, |
| select=None, |
| where=None, |
| params=None, |
| tables=None, |
| order_by=None, |
| select_params=None, |
| ): |
| """Add extra SQL fragments to the query.""" |
| self._not_support_combined_queries("extra") |
| if self.query.is_sliced: |
| raise TypeError("Cannot change a query once a slice has been taken.") |
| clone = self._chain() |
| clone.query.add_extra(select, select_params, where, params, tables, order_by) |
| return clone |
|
|
| def reverse(self): |
| """Reverse the ordering of the QuerySet.""" |
| if self.query.is_sliced: |
| raise TypeError("Cannot reverse a query once a slice has been taken.") |
| clone = self._chain() |
| clone.query.standard_ordering = not clone.query.standard_ordering |
| return clone |
|
|
| def defer(self, *fields): |
| """ |
| Defer the loading of data for certain fields until they are accessed. |
| Add the set of deferred fields to any existing set of deferred fields. |
| The only exception to this is if None is passed in as the only |
| parameter, in which case removal all deferrals. |
| """ |
| self._not_support_combined_queries("defer") |
| if self._fields is not None: |
| raise TypeError("Cannot call defer() after .values() or .values_list()") |
| clone = self._chain() |
| if fields == (None,): |
| clone.query.clear_deferred_loading() |
| else: |
| clone.query.add_deferred_loading(fields) |
| return clone |
|
|
| def only(self, *fields): |
| """ |
| Essentially, the opposite of defer(). Only the fields passed into this |
| method and that are not already specified as deferred are loaded |
| immediately when the queryset is evaluated. |
| """ |
| self._not_support_combined_queries("only") |
| if self._fields is not None: |
| raise TypeError("Cannot call only() after .values() or .values_list()") |
| if fields == (None,): |
| |
| |
| raise TypeError("Cannot pass None as an argument to only().") |
| for field in fields: |
| field = field.split(LOOKUP_SEP, 1)[0] |
| if field in self.query._filtered_relations: |
| raise ValueError("only() is not supported with FilteredRelation.") |
| clone = self._chain() |
| clone.query.add_immediate_loading(fields) |
| return clone |
|
|
| def using(self, alias): |
| """Select which database this QuerySet should execute against.""" |
| clone = self._chain() |
| clone._db = alias |
| return clone |
|
|
| |
| |
| |
|
|
| @property |
| def ordered(self): |
| """ |
| Return True if the QuerySet is ordered -- i.e. has an order_by() |
| clause or a default ordering on the model (or is empty). |
| """ |
| if isinstance(self, EmptyQuerySet): |
| return True |
| if self.query.extra_order_by or self.query.order_by: |
| return True |
| elif ( |
| self.query.default_ordering |
| and self.query.get_meta().ordering |
| and |
| |
| not self.query.group_by |
| ): |
| return True |
| else: |
| return False |
|
|
| @property |
| def db(self): |
| """Return the database used if this query is executed now.""" |
| if self._for_write: |
| return self._db or router.db_for_write(self.model, **self._hints) |
| return self._db or router.db_for_read(self.model, **self._hints) |
|
|
| |
| |
| |
|
|
| def _insert( |
| self, |
| objs, |
| fields, |
| returning_fields=None, |
| raw=False, |
| using=None, |
| on_conflict=None, |
| update_fields=None, |
| unique_fields=None, |
| ): |
| """ |
| Insert a new record for the given model. This provides an interface to |
| the InsertQuery class and is how Model.save() is implemented. |
| """ |
| self._for_write = True |
| if using is None: |
| using = self.db |
| query = sql.InsertQuery( |
| self.model, |
| on_conflict=on_conflict, |
| update_fields=update_fields, |
| unique_fields=unique_fields, |
| ) |
| query.insert_values(fields, objs, raw=raw) |
| return query.get_compiler(using=using).execute_sql(returning_fields) |
|
|
| _insert.alters_data = True |
| _insert.queryset_only = False |
|
|
| def _batched_insert( |
| self, |
| objs, |
| fields, |
| batch_size, |
| on_conflict=None, |
| update_fields=None, |
| unique_fields=None, |
| ): |
| """ |
| Helper method for bulk_create() to insert objs one batch at a time. |
| """ |
| connection = connections[self.db] |
| ops = connection.ops |
| max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) |
| batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size |
| inserted_rows = [] |
| bulk_return = connection.features.can_return_rows_from_bulk_insert |
| for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]: |
| if bulk_return and ( |
| on_conflict is None or on_conflict == OnConflict.UPDATE |
| ): |
| inserted_rows.extend( |
| self._insert( |
| item, |
| fields=fields, |
| using=self.db, |
| on_conflict=on_conflict, |
| update_fields=update_fields, |
| unique_fields=unique_fields, |
| returning_fields=self.model._meta.db_returning_fields, |
| ) |
| ) |
| else: |
| self._insert( |
| item, |
| fields=fields, |
| using=self.db, |
| on_conflict=on_conflict, |
| update_fields=update_fields, |
| unique_fields=unique_fields, |
| ) |
| return inserted_rows |
|
|
| def _chain(self): |
| """ |
| Return a copy of the current QuerySet that's ready for another |
| operation. |
| """ |
| obj = self._clone() |
| if obj._sticky_filter: |
| obj.query.filter_is_sticky = True |
| obj._sticky_filter = False |
| return obj |
|
|
| def _clone(self): |
| """ |
| Return a copy of the current QuerySet. A lightweight alternative |
| to deepcopy(). |
| """ |
| c = self.__class__( |
| model=self.model, |
| query=self.query.chain(), |
| using=self._db, |
| hints=self._hints, |
| ) |
| c._sticky_filter = self._sticky_filter |
| c._for_write = self._for_write |
| c._prefetch_related_lookups = self._prefetch_related_lookups[:] |
| c._known_related_objects = self._known_related_objects |
| c._iterable_class = self._iterable_class |
| c._fields = self._fields |
| return c |
|
|
| def _fetch_all(self): |
| if self._result_cache is None: |
| self._result_cache = list(self._iterable_class(self)) |
| if self._prefetch_related_lookups and not self._prefetch_done: |
| self._prefetch_related_objects() |
|
|
| def _next_is_sticky(self): |
| """ |
| Indicate that the next filter call and the one following that should |
| be treated as a single filter. This is only important when it comes to |
| determining when to reuse tables for many-to-many filters. Required so |
| that we can filter naturally on the results of related managers. |
| |
| This doesn't return a clone of the current QuerySet (it returns |
| "self"). The method is only used internally and should be immediately |
| followed by a filter() that does create a clone. |
| """ |
| self._sticky_filter = True |
| return self |
|
|
| def _merge_sanity_check(self, other): |
| """Check that two QuerySet classes may be merged.""" |
| if self._fields is not None and ( |
| set(self.query.values_select) != set(other.query.values_select) |
| or set(self.query.extra_select) != set(other.query.extra_select) |
| or set(self.query.annotation_select) != set(other.query.annotation_select) |
| ): |
| raise TypeError( |
| "Merging '%s' classes must involve the same values in each case." |
| % self.__class__.__name__ |
| ) |
|
|
| def _merge_known_related_objects(self, other): |
| """ |
| Keep track of all known related objects from either QuerySet instance. |
| """ |
| for field, objects in other._known_related_objects.items(): |
| self._known_related_objects.setdefault(field, {}).update(objects) |
|
|
| def resolve_expression(self, *args, **kwargs): |
| if self._fields and len(self._fields) > 1: |
| |
| |
| raise TypeError("Cannot use multi-field values as a filter value.") |
| query = self.query.resolve_expression(*args, **kwargs) |
| query._db = self._db |
| return query |
|
|
| resolve_expression.queryset_only = True |
|
|
| def _add_hints(self, **hints): |
| """ |
| Update hinting information for use by routers. Add new key/values or |
| overwrite existing key/values. |
| """ |
| self._hints.update(hints) |
|
|
| def _has_filters(self): |
| """ |
| Check if this QuerySet has any filtering going on. This isn't |
| equivalent with checking if all objects are present in results, for |
| example, qs[1:]._has_filters() -> False. |
| """ |
| return self.query.has_filters() |
|
|
| @staticmethod |
| def _validate_values_are_expressions(values, method_name): |
| invalid_args = sorted( |
| str(arg) for arg in values if not hasattr(arg, "resolve_expression") |
| ) |
| if invalid_args: |
| raise TypeError( |
| "QuerySet.%s() received non-expression(s): %s." |
| % ( |
| method_name, |
| ", ".join(invalid_args), |
| ) |
| ) |
|
|
| def _not_support_combined_queries(self, operation_name): |
| if self.query.combinator: |
| raise NotSupportedError( |
| "Calling QuerySet.%s() after %s() is not supported." |
| % (operation_name, self.query.combinator) |
| ) |
|
|
| def _check_operator_queryset(self, other, operator_): |
| if self.query.combinator or other.query.combinator: |
| raise TypeError(f"Cannot use {operator_} operator with combined queryset.") |
|
|
| def _check_ordering_first_last_queryset_aggregation(self, method): |
| if isinstance(self.query.group_by, tuple) and not any( |
| col.output_field is self.model._meta.pk for col in self.query.group_by |
| ): |
| raise TypeError( |
| f"Cannot use QuerySet.{method}() on an unordered queryset performing " |
| f"aggregation. Add an ordering with order_by()." |
| ) |
|
|
|
|
| class InstanceCheckMeta(type): |
| def __instancecheck__(self, instance): |
| return isinstance(instance, QuerySet) and instance.query.is_empty() |
|
|
|
|
| class EmptyQuerySet(metaclass=InstanceCheckMeta): |
| """ |
| Marker class to checking if a queryset is empty by .none(): |
| isinstance(qs.none(), EmptyQuerySet) -> True |
| """ |
|
|
| def __init__(self, *args, **kwargs): |
| raise TypeError("EmptyQuerySet can't be instantiated") |
|
|
|
|
| class RawQuerySet: |
| """ |
| Provide an iterator which converts the results of raw SQL queries into |
| annotated model instances. |
| """ |
|
|
| def __init__( |
| self, |
| raw_query, |
| model=None, |
| query=None, |
| params=(), |
| translations=None, |
| using=None, |
| hints=None, |
| ): |
| self.raw_query = raw_query |
| self.model = model |
| self._db = using |
| self._hints = hints or {} |
| self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) |
| self.params = params |
| self.translations = translations or {} |
| self._result_cache = None |
| self._prefetch_related_lookups = () |
| self._prefetch_done = False |
|
|
| def resolve_model_init_order(self): |
| """Resolve the init field names and value positions.""" |
| converter = connections[self.db].introspection.identifier_converter |
| model_init_fields = [ |
| f for f in self.model._meta.fields if converter(f.column) in self.columns |
| ] |
| annotation_fields = [ |
| (column, pos) |
| for pos, column in enumerate(self.columns) |
| if column not in self.model_fields |
| ] |
| model_init_order = [ |
| self.columns.index(converter(f.column)) for f in model_init_fields |
| ] |
| model_init_names = [f.attname for f in model_init_fields] |
| return model_init_names, model_init_order, annotation_fields |
|
|
| def prefetch_related(self, *lookups): |
| """Same as QuerySet.prefetch_related()""" |
| clone = self._clone() |
| if lookups == (None,): |
| clone._prefetch_related_lookups = () |
| else: |
| clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups |
| return clone |
|
|
| def _prefetch_related_objects(self): |
| prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) |
| self._prefetch_done = True |
|
|
| def _clone(self): |
| """Same as QuerySet._clone()""" |
| c = self.__class__( |
| self.raw_query, |
| model=self.model, |
| query=self.query, |
| params=self.params, |
| translations=self.translations, |
| using=self._db, |
| hints=self._hints, |
| ) |
| c._prefetch_related_lookups = self._prefetch_related_lookups[:] |
| return c |
|
|
| def _fetch_all(self): |
| if self._result_cache is None: |
| self._result_cache = list(self.iterator()) |
| if self._prefetch_related_lookups and not self._prefetch_done: |
| self._prefetch_related_objects() |
|
|
| def __len__(self): |
| self._fetch_all() |
| return len(self._result_cache) |
|
|
| def __bool__(self): |
| self._fetch_all() |
| return bool(self._result_cache) |
|
|
| def __iter__(self): |
| self._fetch_all() |
| return iter(self._result_cache) |
|
|
| def __aiter__(self): |
| |
| |
| async def generator(): |
| await sync_to_async(self._fetch_all)() |
| for item in self._result_cache: |
| yield item |
|
|
| return generator() |
|
|
| def iterator(self): |
| yield from RawModelIterable(self) |
|
|
| def __repr__(self): |
| return "<%s: %s>" % (self.__class__.__name__, self.query) |
|
|
| def __getitem__(self, k): |
| return list(self)[k] |
|
|
| @property |
| def db(self): |
| """Return the database used if this query is executed now.""" |
| return self._db or router.db_for_read(self.model, **self._hints) |
|
|
| def using(self, alias): |
| """Select the database this RawQuerySet should execute against.""" |
| return RawQuerySet( |
| self.raw_query, |
| model=self.model, |
| query=self.query.chain(using=alias), |
| params=self.params, |
| translations=self.translations, |
| using=alias, |
| ) |
|
|
| @cached_property |
| def columns(self): |
| """ |
| A list of model field names in the order they'll appear in the |
| query results. |
| """ |
| columns = self.query.get_columns() |
| |
| for query_name, model_name in self.translations.items(): |
| |
| try: |
| index = columns.index(query_name) |
| except ValueError: |
| pass |
| else: |
| columns[index] = model_name |
| return columns |
|
|
| @cached_property |
| def model_fields(self): |
| """A dict mapping column names to model field names.""" |
| converter = connections[self.db].introspection.identifier_converter |
| model_fields = {} |
| for field in self.model._meta.fields: |
| name, column = field.get_attname_column() |
| model_fields[converter(column)] = field |
| return model_fields |
|
|
|
|
| class Prefetch: |
| def __init__(self, lookup, queryset=None, to_attr=None): |
| |
| self.prefetch_through = lookup |
| |
| self.prefetch_to = lookup |
| if queryset is not None and ( |
| isinstance(queryset, RawQuerySet) |
| or ( |
| hasattr(queryset, "_iterable_class") |
| and not issubclass(queryset._iterable_class, ModelIterable) |
| ) |
| ): |
| raise ValueError( |
| "Prefetch querysets cannot use raw(), values(), and values_list()." |
| ) |
| if to_attr: |
| self.prefetch_to = LOOKUP_SEP.join( |
| lookup.split(LOOKUP_SEP)[:-1] + [to_attr] |
| ) |
|
|
| self.queryset = queryset |
| self.to_attr = to_attr |
|
|
| def __getstate__(self): |
| obj_dict = self.__dict__.copy() |
| if self.queryset is not None: |
| queryset = self.queryset._chain() |
| |
| queryset._result_cache = [] |
| queryset._prefetch_done = True |
| obj_dict["queryset"] = queryset |
| return obj_dict |
|
|
| def add_prefix(self, prefix): |
| self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through |
| self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to |
|
|
| def get_current_prefetch_to(self, level): |
| return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1]) |
|
|
| def get_current_to_attr(self, level): |
| parts = self.prefetch_to.split(LOOKUP_SEP) |
| to_attr = parts[level] |
| as_attr = self.to_attr and level == len(parts) - 1 |
| return to_attr, as_attr |
|
|
| def get_current_queryset(self, level): |
| if self.get_current_prefetch_to(level) == self.prefetch_to: |
| return self.queryset |
| return None |
|
|
| def __eq__(self, other): |
| if not isinstance(other, Prefetch): |
| return NotImplemented |
| return self.prefetch_to == other.prefetch_to |
|
|
| def __hash__(self): |
| return hash((self.__class__, self.prefetch_to)) |
|
|
|
|
| def normalize_prefetch_lookups(lookups, prefix=None): |
| """Normalize lookups into Prefetch objects.""" |
| ret = [] |
| for lookup in lookups: |
| if not isinstance(lookup, Prefetch): |
| lookup = Prefetch(lookup) |
| if prefix: |
| lookup.add_prefix(prefix) |
| ret.append(lookup) |
| return ret |
|
|
|
|
| def prefetch_related_objects(model_instances, *related_lookups): |
| """ |
| Populate prefetched object caches for a list of model instances based on |
| the lookups/Prefetch instances given. |
| """ |
| if not model_instances: |
| return |
|
|
| |
| |
| |
| done_queries = {} |
|
|
| auto_lookups = set() |
| followed_descriptors = set() |
|
|
| all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) |
| while all_lookups: |
| lookup = all_lookups.pop() |
| if lookup.prefetch_to in done_queries: |
| if lookup.queryset is not None: |
| raise ValueError( |
| "'%s' lookup was already seen with a different queryset. " |
| "You may need to adjust the ordering of your lookups." |
| % lookup.prefetch_to |
| ) |
|
|
| continue |
|
|
| |
| |
| obj_list = model_instances |
|
|
| through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) |
| for level, through_attr in enumerate(through_attrs): |
| |
| if not obj_list: |
| break |
|
|
| prefetch_to = lookup.get_current_prefetch_to(level) |
| if prefetch_to in done_queries: |
| |
| obj_list = done_queries[prefetch_to] |
| continue |
|
|
| |
| good_objects = True |
| for obj in obj_list: |
| |
| |
| |
| if not hasattr(obj, "_prefetched_objects_cache"): |
| try: |
| obj._prefetched_objects_cache = {} |
| except (AttributeError, TypeError): |
| |
| |
| |
| |
| |
| good_objects = False |
| break |
| if not good_objects: |
| break |
|
|
| |
|
|
| |
| |
| first_obj = obj_list[0] |
| to_attr = lookup.get_current_to_attr(level)[0] |
| prefetcher, descriptor, attr_found, is_fetched = get_prefetcher( |
| first_obj, through_attr, to_attr |
| ) |
|
|
| if not attr_found: |
| raise AttributeError( |
| "Cannot find '%s' on %s object, '%s' is an invalid " |
| "parameter to prefetch_related()" |
| % ( |
| through_attr, |
| first_obj.__class__.__name__, |
| lookup.prefetch_through, |
| ) |
| ) |
|
|
| if level == len(through_attrs) - 1 and prefetcher is None: |
| |
| |
| |
| raise ValueError( |
| "'%s' does not resolve to an item that supports " |
| "prefetching - this is an invalid parameter to " |
| "prefetch_related()." % lookup.prefetch_through |
| ) |
|
|
| obj_to_fetch = None |
| if prefetcher is not None: |
| obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)] |
|
|
| if obj_to_fetch: |
| obj_list, additional_lookups = prefetch_one_level( |
| obj_to_fetch, |
| prefetcher, |
| lookup, |
| level, |
| ) |
| |
| |
| |
| |
| if not ( |
| prefetch_to in done_queries |
| and lookup in auto_lookups |
| and descriptor in followed_descriptors |
| ): |
| done_queries[prefetch_to] = obj_list |
| new_lookups = normalize_prefetch_lookups( |
| reversed(additional_lookups), prefetch_to |
| ) |
| auto_lookups.update(new_lookups) |
| all_lookups.extend(new_lookups) |
| followed_descriptors.add(descriptor) |
| else: |
| |
| |
| |
|
|
| |
| |
| |
| new_obj_list = [] |
| for obj in obj_list: |
| if through_attr in getattr(obj, "_prefetched_objects_cache", ()): |
| |
| |
| new_obj = list(obj._prefetched_objects_cache.get(through_attr)) |
| else: |
| try: |
| new_obj = getattr(obj, through_attr) |
| except exceptions.ObjectDoesNotExist: |
| continue |
| if new_obj is None: |
| continue |
| |
| |
| |
| if isinstance(new_obj, list): |
| new_obj_list.extend(new_obj) |
| else: |
| new_obj_list.append(new_obj) |
| obj_list = new_obj_list |
|
|
|
|
| def get_prefetcher(instance, through_attr, to_attr): |
| """ |
| For the attribute 'through_attr' on the given instance, find |
| an object that has a get_prefetch_queryset(). |
| Return a 4 tuple containing: |
| (the object with get_prefetch_queryset (or None), |
| the descriptor object representing this relationship (or None), |
| a boolean that is False if the attribute was not found at all, |
| a function that takes an instance and returns a boolean that is True if |
| the attribute has already been fetched for that instance) |
| """ |
|
|
| def has_to_attr_attribute(instance): |
| return hasattr(instance, to_attr) |
|
|
| prefetcher = None |
| is_fetched = has_to_attr_attribute |
|
|
| |
| |
| |
| rel_obj_descriptor = getattr(instance.__class__, through_attr, None) |
| if rel_obj_descriptor is None: |
| attr_found = hasattr(instance, through_attr) |
| else: |
| attr_found = True |
| if rel_obj_descriptor: |
| |
| |
| if hasattr(rel_obj_descriptor, "get_prefetch_queryset"): |
| prefetcher = rel_obj_descriptor |
| is_fetched = rel_obj_descriptor.is_cached |
| else: |
| |
| |
| |
| rel_obj = getattr(instance, through_attr) |
| if hasattr(rel_obj, "get_prefetch_queryset"): |
| prefetcher = rel_obj |
| if through_attr != to_attr: |
| |
| |
| if isinstance( |
| getattr(instance.__class__, to_attr, None), cached_property |
| ): |
|
|
| def has_cached_property(instance): |
| return to_attr in instance.__dict__ |
|
|
| is_fetched = has_cached_property |
| else: |
|
|
| def in_prefetched_cache(instance): |
| return through_attr in instance._prefetched_objects_cache |
|
|
| is_fetched = in_prefetched_cache |
| return prefetcher, rel_obj_descriptor, attr_found, is_fetched |
|
|
|
|
| def prefetch_one_level(instances, prefetcher, lookup, level): |
| """ |
| Helper function for prefetch_related_objects(). |
| |
| Run prefetches on all instances using the prefetcher object, |
| assigning results to relevant caches in instance. |
| |
| Return the prefetched objects along with any additional prefetches that |
| must be done due to prefetch_related lookups found from default managers. |
| """ |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| ( |
| rel_qs, |
| rel_obj_attr, |
| instance_attr, |
| single, |
| cache_name, |
| is_descriptor, |
| ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)) |
| |
| |
| |
| |
| |
| |
| additional_lookups = [ |
| copy.copy(additional_lookup) |
| for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ()) |
| ] |
| if additional_lookups: |
| |
| |
| |
| rel_qs._prefetch_related_lookups = () |
|
|
| all_related_objects = list(rel_qs) |
|
|
| rel_obj_cache = {} |
| for rel_obj in all_related_objects: |
| rel_attr_val = rel_obj_attr(rel_obj) |
| rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) |
|
|
| to_attr, as_attr = lookup.get_current_to_attr(level) |
| |
| if as_attr and instances: |
| |
| |
| model = instances[0].__class__ |
| try: |
| model._meta.get_field(to_attr) |
| except exceptions.FieldDoesNotExist: |
| pass |
| else: |
| msg = "to_attr={} conflicts with a field on the {} model." |
| raise ValueError(msg.format(to_attr, model.__name__)) |
|
|
| |
| leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level |
|
|
| for obj in instances: |
| instance_attr_val = instance_attr(obj) |
| vals = rel_obj_cache.get(instance_attr_val, []) |
|
|
| if single: |
| val = vals[0] if vals else None |
| if as_attr: |
| |
| setattr(obj, to_attr, val) |
| elif is_descriptor: |
| |
| |
| setattr(obj, cache_name, val) |
| else: |
| |
| |
| |
| obj._state.fields_cache[cache_name] = val |
| else: |
| if as_attr: |
| setattr(obj, to_attr, vals) |
| else: |
| manager = getattr(obj, to_attr) |
| if leaf and lookup.queryset is not None: |
| qs = manager._apply_rel_filters(lookup.queryset) |
| else: |
| qs = manager.get_queryset() |
| qs._result_cache = vals |
| |
| |
| qs._prefetch_done = True |
| obj._prefetched_objects_cache[cache_name] = qs |
| return all_related_objects, additional_lookups |
|
|
|
|
| class RelatedPopulator: |
| """ |
| RelatedPopulator is used for select_related() object instantiation. |
| |
| The idea is that each select_related() model will be populated by a |
| different RelatedPopulator instance. The RelatedPopulator instances get |
| klass_info and select (computed in SQLCompiler) plus the used db as |
| input for initialization. That data is used to compute which columns |
| to use, how to instantiate the model, and how to populate the links |
| between the objects. |
| |
| The actual creation of the objects is done in populate() method. This |
| method gets row and from_obj as input and populates the select_related() |
| model instance. |
| """ |
|
|
| def __init__(self, klass_info, select, db): |
| self.db = db |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| select_fields = klass_info["select_fields"] |
| from_parent = klass_info["from_parent"] |
| if not from_parent: |
| self.cols_start = select_fields[0] |
| self.cols_end = select_fields[-1] + 1 |
| self.init_list = [ |
| f[0].target.attname for f in select[self.cols_start : self.cols_end] |
| ] |
| self.reorder_for_init = None |
| else: |
| attname_indexes = { |
| select[idx][0].target.attname: idx for idx in select_fields |
| } |
| model_init_attnames = ( |
| f.attname for f in klass_info["model"]._meta.concrete_fields |
| ) |
| self.init_list = [ |
| attname for attname in model_init_attnames if attname in attname_indexes |
| ] |
| self.reorder_for_init = operator.itemgetter( |
| *[attname_indexes[attname] for attname in self.init_list] |
| ) |
|
|
| self.model_cls = klass_info["model"] |
| self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) |
| self.related_populators = get_related_populators(klass_info, select, self.db) |
| self.local_setter = klass_info["local_setter"] |
| self.remote_setter = klass_info["remote_setter"] |
|
|
| def populate(self, row, from_obj): |
| if self.reorder_for_init: |
| obj_data = self.reorder_for_init(row) |
| else: |
| obj_data = row[self.cols_start : self.cols_end] |
| if obj_data[self.pk_idx] is None: |
| obj = None |
| else: |
| obj = self.model_cls.from_db(self.db, self.init_list, obj_data) |
| for rel_iter in self.related_populators: |
| rel_iter.populate(row, obj) |
| self.local_setter(from_obj, obj) |
| if obj is not None: |
| self.remote_setter(obj, from_obj) |
|
|
|
|
| def get_related_populators(klass_info, select, db): |
| iterators = [] |
| related_klass_infos = klass_info.get("related_klass_infos", []) |
| for rel_klass_info in related_klass_infos: |
| rel_cls = RelatedPopulator(rel_klass_info, select, db) |
| iterators.append(rel_cls) |
| return iterators |
|
|